text
stringlengths 681
1.05M
| score
float64 0
0.27
|
---|---|
"""
Test the session-flushing middleware
"""
import unittest
from django.conf import settings
from django.test import Client
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestSessionFlushMiddleware(unittest.TestCase):
"""
Ensure that if the pipeline is exited when it's been quarantined,
the entire session is flushed.
"""
def test_session_flush(self):
"""
Test that a quarantined session is flushed when navigating elsewhere
"""
client = Client()
session = client.session
session['fancy_variable'] = 13025
session['partial_pipeline'] = 'pipeline_running'
session['third_party_auth_quarantined_modules'] = ('fake_quarantined_module',)
session.save()
client.get('/')
self.assertEqual(client.session.get('fancy_variable', None), None)
def test_session_no_running_pipeline(self):
"""
Test that a quarantined session without a running pipeline is not flushed
"""
client = Client()
session = client.session
session['fancy_variable'] = 13025
session['third_party_auth_quarantined_modules'] = ('fake_quarantined_module',)
session.save()
client.get('/')
self.assertEqual(client.session.get('fancy_variable', None), 13025)
def test_session_no_quarantine(self):
"""
Test that a session with a running pipeline but no quarantine is not flushed
"""
client = Client()
session = client.session
session['fancy_variable'] = 13025
session['partial_pipeline'] = 'pipeline_running'
session.save()
client.get('/')
self.assertEqual(client.session.get('fancy_variable', None), 13025)
| 0.002798 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add ip_allocation to port """
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '5cd92597d11d'
down_revision = '6b461a21bcfc'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.NEWTON]
def upgrade():
op.add_column('ports',
sa.Column('ip_allocation',
sa.String(length=16),
nullable=True))
| 0 |
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved
# Django
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from django.contrib.auth.models import User
class UpdatePassword(object):
def update_password(self, username, password):
changed = False
u = User.objects.get(username=username)
if not u:
raise RuntimeError("User not found")
check = u.check_password(password)
if not check:
u.set_password(password)
u.save()
changed = True
return changed
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--username', dest='username', action='store', type=str, default=None,
help='username to change the password for')
parser.add_argument('--password', dest='password', action='store', type=str, default=None,
help='new password for user')
def handle(self, *args, **options):
if not options['username']:
raise CommandError('username required')
if not options['password']:
raise CommandError('password required')
cp = UpdatePassword()
res = cp.update_password(options['username'], options['password'])
if res:
return "Password updated"
return "Password not updated"
| 0.001407 |
import numpy as np
from petsc4py import PETSc
from src.geo import *
from src import stokes_flow as sf
from src.support_class import *
from src.StokesFlowMethod import *
__all__ = ['createEcoli_ellipse', 'createEcoliComp_ellipse', 'createEcoli_2tails',
'createEcoliComp_tunnel', 'createEcoli_tunnel', 'create_ecoli_dualTail',
'create_ecoli_2part', 'create_ecoli_tail', 'create_ecoli_tail_at',
'create_rotlets_tail_2part', 'create_selfRepeat_tail',
'create_ecoli_2part_at', 'create_ecoli_dualTail_at',
'get_tail_nodes_split_at', 'get_ecoli_nodes_split_at',
'create_diskVane_tail',
'create_capsule',
'create_rod',
'create_infHelix',
'create_helicoid_list', 'create_helicoid_comp',
'creat_helicoid_dumb', 'creat_helicoid_dumb_selfRotate',
'obj2helicoid_list', 'obj2helicoid_list_v2', 'obj2helicoid_list_v3',
'obj2helicoid_comp', 'obj2helicoid_list_selfRotate',
'create_sphere', 'create_move_single_sphere', 'create_one_ellipse']
def create_capsule(rs1, rs2, ls, ds, node_dof=3):
lvs3 = ls - 2 * rs2
dth = ds / rs2
err_msg = 'geo parameter of create_capsule head is wrong. '
assert lvs3 >= 0, err_msg
vsgeo = base_geo()
vsgeo.set_dof(node_dof)
vsgeo1 = ellipse_base_geo() # velocity node geo of head
vsgeo1.create_half_delta(ds, rs1, rs2)
vsgeo2 = vsgeo1.copy()
vsgeo1.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
vsgeo1.node_rotation(norm=np.array((0, 0, 1)), theta=-np.pi / 2)
vsgeo1.move((0, 0, +lvs3 / 2))
vsgeo2.node_rotation(norm=np.array((0, 1, 0)), theta=+np.pi / 2)
vsgeo2.node_rotation(norm=np.array((0, 0, 1)), theta=+np.pi / 2 - dth)
vsgeo2.move((0, 0, -lvs3 / 2))
vsgeo2.set_nodes(np.flipud(vsgeo2.get_nodes()), deltalength=vsgeo2.get_deltaLength())
if lvs3 > ds:
vsgeo3 = tunnel_geo()
vsgeo3.create_deltatheta(dth=dth, radius=rs2, length=lvs3)
vsgeo.combine([vsgeo1, vsgeo3, vsgeo2])
else:
vsgeo.combine([vsgeo1, vsgeo2])
return vsgeo
def create_ecoli_tail(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
# create helix
vhobj0 = obj_type()
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = FatHelix() # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand)
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
if with_T_geo:
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
return tail_list
def create_ecoli_tail_bck(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
# create helix
vhobj0 = obj_type()
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = FatHelix() # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand)
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
return tail_list
def create_diskVane_tail(moveh, **kwargs):
r1 = kwargs['diskVane_r1']
r2 = kwargs['diskVane_r2']
rz = kwargs['diskVane_rz']
th_loc = kwargs['diskVane_th_loc']
# ph_loc = kwargs['diskVane_ph_loc']
ds = kwargs['diskVane_ds']
nr = kwargs['diskVane_nr']
nz = kwargs['diskVane_nz']
tgeo = regularizeDisk()
tgeo.create_ds(ds, r2)
tgeo.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo.node_rotation(norm=np.array([0, 0, 1]), theta=th_loc, rotation_origin=np.zeros(3))
tgeo.move(np.array((r1, 0, moveh)))
tgeo_list0 = []
trot = 2 * np.pi / nr
for i0 in range(nr):
th = trot * i0
tgeo2 = tgeo.copy()
tgeo2.node_rotation(norm=np.array((0, 0, 1)), theta=th, rotation_origin=np.zeros(3))
tgeo_list0.append(tgeo2)
if np.isclose(nz, 1):
tgeo_list = tgeo_list0
else:
tgeo_list = []
tz = rz / (nz - 1)
for i0 in range(nz):
tmove = tz * i0
th = np.pi * i0
for tgeoi in tgeo_list0:
tgeoj = tgeoi.copy()
tgeoj.move(np.array((0, 0, tmove)))
tgeoj.node_rotation(norm=np.array((0, 0, 1)), theta=th, rotation_origin=np.zeros(3))
tgeo_list.append(tgeoj)
return tgeo_list
def create_selfRepeat_tail(moveh, **kwargs):
nth = kwargs['nth']
hfct = kwargs['hfct']
eh = kwargs['eh']
ch = kwargs['ch']
rh11 = kwargs['rh11']
rh12 = kwargs['rh12']
rh2 = kwargs['rh2']
ph = kwargs['ph']
n_tail = kwargs['n_tail']
with_cover = kwargs['with_cover']
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
left_hand = kwargs['left_hand']
rT2 = kwargs['rT2']
repeat_n = kwargs['repeat_n']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
zoom_factor = kwargs['zoom_factor']
obj_type = sf.obj_dic[matrix_method]
# create helix
vhobj0 = obj_type() # type: sf.StokesFlowObj
node_dof = vhobj0.get_n_unknown()
B = ph / (2 * np.pi)
vhgeo0 = SelfRepeat_FatHelix(repeat_n) # velocity node geo of helix
if 'dualPotential' in matrix_method:
vhgeo0.set_check_epsilon(False)
vhgeo0.set_dof(node_dof)
dth = 2 * np.pi / nth
fhgeo0 = vhgeo0.create_deltatheta(dth=dth, radius=rh2, R1=rh11, R2=rh12, B=B, n_c=ch,
epsilon=eh, with_cover=with_cover, factor=hfct,
left_hand=left_hand) # type: SelfRepeat_FatHelix
vhobj0.set_data(fhgeo0, vhgeo0, name='helix_0')
vhobj0.zoom(zoom_factor)
if with_T_geo:
# dbg
OptDB = PETSc.Options()
factor = OptDB.getReal('dbg_theta_factor', 1.5)
PETSc.Sys.Print('--------------------> DBG: dbg_theta_factor = %f' % factor)
theta = np.pi * ch + (rT2 + rh2 * factor) / (rh11 + rh2)
vhobj0.node_rotation(norm=np.array((0, 0, 1)), theta=theta)
vhobj0.move(moveh * zoom_factor)
tail_list = uniqueList()
for i0 in range(n_tail):
theta = 2 * np.pi / n_tail * i0
vhobj1 = vhobj0.copy()
vhobj1.node_rotation(norm=(0, 0, 1), theta=theta, rotation_origin=center.copy())
vhobj1.set_name('helix_%d' % i0)
tail_list.append(vhobj1)
tail_start_list = []
tail_body0_list = []
tail_end_list = []
for tobj in tail_list:
vhgeo0 = tobj.get_u_geo()
fhgeo0 = tobj.get_f_geo()
#
part_obj = obj_type()
part_ugeo = vhgeo0.get_start_geo()
part_fgeo = fhgeo0.get_start_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_start')
tail_start_list.append(part_obj)
#
part_obj = sf.SelfRepeatObj()
part_ugeo = vhgeo0.get_body_mid_geo()
part_fgeo = fhgeo0.get_body_mid_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_body0')
tail_body0_list.append(part_obj)
#
part_obj = obj_type()
part_ugeo = vhgeo0.get_end_geo()
part_fgeo = fhgeo0.get_end_geo()
part_obj.set_data(part_fgeo, part_ugeo, name='helix_0_end')
tail_end_list.append(part_obj)
return tail_list, tail_start_list, tail_body0_list, tail_end_list
def create_ecoli_tail_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_list)
tail_obj.node_rotation(np.array((0, 1, 0)), theta)
tail_obj.node_rotation(np.array((0, 0, 1)), phi)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
tail_obj.move(now_center)
return tail_obj
def get_tail_nodes_split_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_list)
tail_obj.node_rotation(np.array((0, 1, 0)), theta)
tail_obj.node_rotation(np.array((0, 0, 1)), phi)
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
tail_obj.move(now_center)
n_tail = problem_kwargs['n_tail']
t0 = np.split(tail_obj.get_u_nodes(), 2 * n_tail)
t1 = np.vstack(t0[1::2])
t2 = np.vstack(t0[0::2])
return t1, t2
def createEcoli_ellipse(name='...', **kwargs):
ch = kwargs['ch']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor'] if 'zoom_factor' in kwargs.keys() else 1
dist_hs = kwargs['dist_hs']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
movesz = 0.5 * (dist_hs - 2 * rs1 + lh) + rs1
movehz = 0.5 * (dist_hs + 2 * rs1 - lh) + lh / 2
moves = np.array((0, 0, movesz)) + center # move distance of sphere
moveh = np.array((0, 0, -movehz)) + center # move distance of helix
objtype = sf.obj_dic[matrix_method]
# create tail
tail_list = create_ecoli_tail(moveh, **kwargs)
# create head
vsgeo = ellipse_base_geo() # velocity node geo of sphere
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
vsobj = objtype()
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
vsobj.move(moves * zoom_factor)
return vsobj, tail_list
def createEcoli_2tails(name='...', **kwargs):
ch = kwargs['ch']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor'] if 'zoom_factor' in kwargs.keys() else 1
dist_hs = kwargs['dist_hs']
center = kwargs['center']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
objtype = sf.obj_dic[matrix_method]
# create tail
movez = np.array((0, 0, rs1 + dist_hs + lh / 2))
tkwargs = kwargs.copy()
tkwargs['left_hand'] = False
tail_list1 = create_ecoli_tail(-movez, **tkwargs)
tkwargs['left_hand'] = True
tail_list2 = create_ecoli_tail(movez, **tkwargs)
# create head
vsgeo = ellipse_base_geo() # velocity node geo of sphere
vsgeo.create_delta(ds, rs1, rs2)
vsgeo.node_rotation(norm=np.array((0, 1, 0)), theta=-np.pi / 2)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
vsobj = objtype()
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
return vsobj, tail_list1, tail_list2
def createEcoliComp_ellipse(name='...', **kwargs):
vsobj, tail_list = createEcoli_ellipse(name=name, **kwargs)
vsgeo = vsobj.get_u_geo()
center = kwargs['center']
rel_Us = kwargs['rel_Us']
rel_Uh = kwargs['rel_Uh']
ecoli_comp = sf.ForceFreeComposite(center=center.copy(), norm=vsgeo.get_geo_norm().copy(),
name=name)
ecoli_comp.add_obj(vsobj, rel_U=rel_Us)
for ti in tail_list:
ecoli_comp.add_obj(ti, rel_U=rel_Uh)
rot_norm = kwargs['rot_norm']
rot_theta = kwargs['rot_theta'] * np.pi
ecoli_comp.node_rotation(norm=rot_norm.copy(), theta=rot_theta, rotation_origin=center.copy())
return ecoli_comp
def createEcoli_tunnel(**kwargs):
ch = kwargs['ch']
rh1 = kwargs['rh1']
rh2 = kwargs['rh2']
ph = kwargs['ph']
ds = kwargs['ds']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
ls = kwargs['ls']
es = kwargs['es']
# sphere_rotation = kwargs['sphere_rotation'] if 'sphere_rotation' in kwargs.keys() else 0
zoom_factor = kwargs['zoom_factor']
dist_hs = kwargs['dist_hs']
center = kwargs['center']
rT1 = kwargs['rT1']
rT2 = kwargs['rT2']
ntT = kwargs['ntT']
eT = kwargs['eT']
Tfct = kwargs['Tfct']
matrix_method = kwargs['matrix_method']
lh = ph * ch # length of helix
movesz = 0.5 * (dist_hs - ls + lh) + ls / 2
movehz = -1 * (0.5 * (dist_hs + ls - lh) + lh / 2)
# movesz = (ls + dist_hs) / 2
# movehz = (lh + dist_hs) / 2
moves = np.array((0, 0, movesz)) + center # move distance of sphere
moveh = np.array((rT1 - rh1, 0, movehz)) + center # move distance of helix
lT = (rT1 + rh2) * 2
objtype = sf.obj_dic[matrix_method]
# create helix
tail_list = create_ecoli_tail(moveh, **kwargs)
# create head
vsobj = objtype()
node_dof = vsobj.get_n_unknown()
vsgeo = create_capsule(rs1, rs2, ls, ds, node_dof)
fsgeo = vsgeo.copy() # force node geo of sphere
fsgeo.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
fsgeo.node_zoom_z(1 - ds / (0.5 * (rs1 + rs2)) * es)
vsobj.set_data(fsgeo, vsgeo, name='sphere_0')
vsobj.zoom(zoom_factor)
vsobj.move(moves * zoom_factor)
# create T shape
dtT = 2 * np.pi / ntT
vTobj = objtype()
node_dof = vTobj.get_n_unknown()
# # dbg
# OptDB = PETSc.Options( )
# factor = OptDB.getReal('dbg_move_factor', 1)
# PETSc.Sys.Print('--------------------> DBG: dbg_move_factor = %f' % factor)
# moveT = np.array((0, 0, moveh[-1] + lh / 2 + rh2 * factor))
moveT = np.array((0, 0, movehz + lh / 2)) + center
vTgeo = tunnel_geo()
if 'dualPotential' in matrix_method:
vTgeo.set_check_epsilon(False)
vTgeo.set_dof(node_dof)
fTgeo = vTgeo.create_deltatheta(dth=dtT, radius=rT2, factor=Tfct, length=lT, epsilon=eT,
with_cover=1)
vTobj.set_data(fTgeo, vTgeo, name='T_shape_0')
theta = -np.pi / 2
vTobj.node_rotation(norm=np.array((0, 1, 0)), theta=theta)
vTobj.zoom(zoom_factor)
vTobj.move(moveT * zoom_factor)
theta = np.pi / 4 - ch * np.pi
vsobj.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
for ti in tail_list:
ti.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
vTobj.node_rotation(norm=np.array((0, 0, 1)), theta=theta, rotation_origin=center)
return vsobj, tail_list, vTobj
def createEcoliComp_tunnel(name='...', **kwargs):
with_T_geo = kwargs['with_T_geo'] if 'with_T_geo' in kwargs.keys() else 0
center = kwargs['center']
rel_Us = kwargs['rel_Us']
rel_Uh = kwargs['rel_Uh']
if not with_T_geo:
kwargs['rT1'] = kwargs['rh1']
vsobj, tail_list, vTobj = createEcoli_tunnel(**kwargs)
ecoli_comp = sf.ForceFreeComposite(center, norm=vsobj.get_u_geo().get_geo_norm(), name=name)
ecoli_comp.add_obj(vsobj, rel_U=rel_Us)
for ti in tail_list:
ecoli_comp.add_obj(ti, rel_U=rel_Uh)
if with_T_geo:
ecoli_comp.add_obj(vTobj, rel_U=rel_Uh)
return ecoli_comp
def create_ecoli_2part(**problem_kwargs):
# create a ecoli contain two parts, one is head and one is tail.
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
update_order = problem_kwargs['update_order'] if 'update_order' in problem_kwargs.keys() else 1
update_fun = problem_kwargs[
'update_fun'] if 'update_fun' in problem_kwargs.keys() else Adams_Bashforth_Methods
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
head_obj, tail_obj_list = createEcoli_ellipse(name='ecoli0', **problem_kwargs)
head_obj.set_name('head_obj')
tail_obj = sf.StokesFlowObj()
tail_obj.set_name('tail_obj')
tail_obj.combine(tail_obj_list)
head_geo = head_obj.get_u_geo()
# ecoli_comp = sf.ForceFreeComposite(center=head_geo.get_center(), norm=head_geo.get_geo_norm(), name='ecoli_0')
ecoli_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=head_geo.get_geo_norm(),
name='ecoli_0')
ecoli_comp.add_obj(obj=head_obj, rel_U=rel_Us)
ecoli_comp.add_obj(obj=tail_obj, rel_U=rel_Uh)
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_rotlets_tail_2part(rotlet_strength=0, **problem_kwargs):
# create a swimmer with a infinite small head (the limit is a rotlet) and tail(s).
ch = problem_kwargs['ch']
ph = problem_kwargs['ph']
dist_hs = problem_kwargs['dist_hs']
lh = ph * ch # length of helix
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
tail_list = create_ecoli_tail(np.zeros(3), **problem_kwargs)
tail_obj0 = sf.StokesFlowObj()
tail_obj0.combine(tail_list)
tail_obj = sf.FundSoltObj()
tail_obj.set_data(tail_obj0.get_u_geo(), tail_obj0.get_f_geo(), name='rotlets_tail_obj')
location = np.array((0, 0, lh / 2 + dist_hs))
tnorm = tail_obj0.get_u_geo().get_geo_norm()
torque = tnorm * rotlet_strength
tail_obj.add_point_force(location=location, force=torque,
StokesletsHandle=light_rotlets_matrix_3d)
givenT = np.hstack((np.zeros(3), -1 * torque))
ecoli_comp = sf.GivenForceComposite(center=np.zeros(3), norm=tnorm,
name='rotlets_tail_comp', givenF=givenT)
ecoli_comp.add_obj(obj=tail_obj, rel_U=np.zeros(6))
update_order = problem_kwargs['update_order'] \
if 'update_order' in problem_kwargs.keys() \
else 1
update_fun = problem_kwargs['update_fun'] \
if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_ecoli_2part_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
tail_obj = ecoli_comp.get_obj_list()[1]
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
ecoli_comp.move(now_center)
return ecoli_comp
def get_ecoli_nodes_split_at(theta, phi, psi_tail, now_center=np.zeros(3), **problem_kwargs):
ecoli_comp = create_ecoli_2part(**problem_kwargs)
ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
tail_obj = ecoli_comp.get_obj_list()[1]
tail_obj.node_rotation(tail_obj.get_u_geo().get_geo_norm(), psi_tail)
ecoli_comp.move(now_center)
n_tail = problem_kwargs['n_tail']
t0 = np.split(tail_obj.get_u_nodes(), 2 * n_tail)
t1 = np.vstack(t0[1::2])
t2 = np.vstack(t0[0::2])
t3 = ecoli_comp.get_obj_list()[0].get_u_nodes()
return t1, t2, t3
def create_ecoli_dualTail(**problem_kwargs):
# create a swimmer with two tails in the ends. one is left hand and one is right hand.
# the swimmer contain three parts, i.e. head, upper tail and down tail.
rel_Us = problem_kwargs['rel_Us']
rel_Uh = problem_kwargs['rel_Uh']
update_order = problem_kwargs['update_order'] if 'update_order' in problem_kwargs.keys() else 1
update_fun = problem_kwargs['update_fun'] if 'update_fun' in problem_kwargs.keys() \
else Adams_Bashforth_Methods
with_T_geo = problem_kwargs['with_T_geo']
err_msg = 'currently, do not support with_T_geo for this kind of ecoli. '
assert not with_T_geo, err_msg
head_obj, tail_obj_l1, tail_obj_l2 = createEcoli_2tails(name='ecoli0', **problem_kwargs)
head_obj.set_name('head_obj')
tail_obj1 = sf.StokesFlowObj()
tail_obj1.set_name('tail_obj1')
tail_obj1.combine(tail_obj_l1)
tail_obj2 = sf.StokesFlowObj()
tail_obj2.set_name('tail_obj2')
tail_obj2.combine(tail_obj_l2)
head_geo = head_obj.get_u_geo()
tnorm = head_geo.get_geo_norm()
ecoli_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=tnorm, name='ecoli_0')
ecoli_comp.add_obj(obj=head_obj, rel_U=rel_Us)
ecoli_comp.add_obj(obj=tail_obj1, rel_U=rel_Uh)
ecoli_comp.add_obj(obj=tail_obj2, rel_U=-rel_Uh)
ecoli_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return ecoli_comp
def create_ecoli_dualTail_at(theta, phi, psi_tail1, psi_tail2, center=np.zeros(3),
**problem_kwargs):
assert 1 == 2
ecoli_comp = create_ecoli_dualTail(**problem_kwargs)
# ecoli_comp.node_rotation(np.array((0, 1, 0)), theta)
# ecoli_comp.node_rotation(np.array((0, 0, 1)), phi)
# tail_obj1 = ecoli_comp.get_obj_list()[1]
# tail_obj1.node_rotation(tail_obj1.get_u_geo().get_geo_norm(), psi_tail1)
# tail_obj2 = ecoli_comp.get_obj_list()[2]
# tail_obj2.node_rotation(tail_obj2.get_u_geo().get_geo_norm(), psi_tail2)
return ecoli_comp
def create_sphere(namehandle='sphereObj', **kwargs):
matrix_method = kwargs['matrix_method']
rs = kwargs['rs']
sphere_velocity = kwargs['sphere_velocity']
ds = kwargs['ds']
es = kwargs['es']
sphere_coord = kwargs['sphere_coord']
objtype = sf.obj_dic[matrix_method]
obj_sphere = objtype()
sphere_geo0 = sphere_geo() # force geo
sphere_geo0.set_dof(obj_sphere.get_n_unknown())
sphere_geo0.create_delta(ds, rs)
sphere_geo0.set_rigid_velocity([0, 0, 0, 0, 0, 0])
sphere_geo1 = sphere_geo0.copy()
if 'pf' in matrix_method:
sphere_geo1.node_zoom((rs + ds * es) / rs)
obj_sphere.set_data(sphere_geo1, sphere_geo0)
obj_list = []
for i0, (t_coord, t_velocity) in enumerate(zip(sphere_coord, sphere_velocity)):
obj2 = obj_sphere.copy()
obj2.set_name('%s_%d' % (namehandle, i0))
obj2.move(t_coord)
obj2.get_u_geo().set_rigid_velocity(t_velocity)
obj_list.append(obj2)
return obj_list
def create_one_ellipse(namehandle='sphereObj', **kwargs):
matrix_method = kwargs['matrix_method']
rs1 = kwargs['rs1']
rs2 = kwargs['rs2']
sphere_velocity = kwargs['sphere_velocity']
ds = kwargs['ds']
es = kwargs['es']
sphere_coord = kwargs['sphere_coord']
objtype = sf.obj_dic[matrix_method]
obj_sphere = objtype() # type: sf.StokesFlowObj
sphere_geo0 = ellipse_base_geo() # force geo
sphere_geo0.set_dof(obj_sphere.get_n_unknown())
sphere_geo0.create_delta(ds, rs1, rs2)
sphere_geo0.set_rigid_velocity(sphere_velocity)
sphere_geo1 = sphere_geo0.copy()
if 'pf' in matrix_method:
sphere_geo1.node_zoom(1 + ds / (0.5 * (rs1 + rs2)) * es)
obj_sphere.set_data(sphere_geo1, sphere_geo0, name=namehandle)
obj_sphere.move(sphere_coord)
return obj_sphere
def create_move_single_sphere(namehandle='sphereObj', **kwargs):
movez = kwargs['movez']
obj_sphere = create_sphere(namehandle, **kwargs)[0]
displacement = np.array((0, 0, movez))
obj_sphere.move(displacement)
obj_list = (obj_sphere,)
return obj_list
def create_rod(namehandle='rod_obj', **problem_kwargs):
rRod = problem_kwargs['rRod']
lRod = problem_kwargs['lRod']
ntRod = problem_kwargs['ntRod']
eRod = problem_kwargs['eRod']
Rodfct = problem_kwargs['Rodfct']
RodThe = problem_kwargs['RodThe']
RodPhi = problem_kwargs['RodPhi']
rel_URod = problem_kwargs['rel_URod']
RodCenter = problem_kwargs['RodCenter']
zoom_factor = problem_kwargs['zoom_factor']
givenF = problem_kwargs['givenF']
matrix_method = problem_kwargs['matrix_method']
dth = 2 * np.pi / ntRod
rod_geo = tunnel_geo()
rod_geo.create_deltatheta(dth=dth, radius=rRod, length=lRod, epsilon=eRod,
with_cover=1, factor=Rodfct, left_hand=False)
# first displace the rod above the surface, rotate to horizon.
rod_geo.move(displacement=RodCenter)
rod_geo.node_zoom(factor=zoom_factor, zoom_origin=RodCenter)
norm = np.array((0, 1, 0))
theta = -np.pi / 2
rod_geo.node_rotation(norm=norm, theta=theta, rotation_origin=RodCenter)
# then, the rod is rotate in a specified plane, which is parabled to XY plane (the wall) first, then
# rotated angle theta, of an angle phi.
norm = np.array((0, np.sin(RodPhi), np.cos(RodPhi)))
rod_geo.node_rotation(norm=norm, theta=-RodThe, rotation_origin=RodCenter)
rod_obj = sf.obj_dic[matrix_method]()
name = namehandle + '_obj_0'
rod_obj.set_data(f_geo=rod_geo, u_geo=rod_geo, name=name)
name = namehandle + '_0'
rod_comp = sf.GivenForceComposite(center=RodCenter, name=name, givenF=givenF.copy())
rod_comp.add_obj(obj=rod_obj, rel_U=rel_URod)
rod_list = (rod_comp,)
return rod_list
def create_infHelix(namehandle='infhelix', normalize=False, **problem_kwargs):
n_tail = problem_kwargs['n_tail']
eh = problem_kwargs['eh']
ch = problem_kwargs['ch']
rh1 = problem_kwargs['rh1']
rh2 = problem_kwargs['rh2']
ph = problem_kwargs['ph']
nth = problem_kwargs['nth']
zoom_factor = problem_kwargs['zoom_factor']
if normalize:
rh2 = rh2 * zoom_factor
ph = ph * zoom_factor
rh1 = rh1 * zoom_factor
helix_list = []
for i0, theta0 in enumerate(np.linspace(0, 2 * np.pi, n_tail, endpoint=False)):
infhelix_ugeo = infHelix()
infhelix_ugeo.create_n(rh1, rh2, ph, ch, nth, theta0=theta0)
infhelix_fgeo = infhelix_ugeo.create_fgeo(epsilon=eh)
infhelix_obj = sf.StokesFlowObj()
infhelix_obj.set_data(f_geo=infhelix_fgeo, u_geo=infhelix_ugeo,
name=namehandle + '%02d' % i0)
helix_list.append(infhelix_obj)
return helix_list
def create_helicoid_list(namehandle='helicoid', **problem_kwargs):
r1 = problem_kwargs['helicoid_r1']
r2 = problem_kwargs['helicoid_r2']
ds = problem_kwargs['helicoid_ds']
th_loc = problem_kwargs['helicoid_th_loc']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
matrix_method = problem_kwargs['matrix_method']
assert matrix_method in ('rs', 'lg_rs')
assert ndsk_each == 4
tgeo = regularizeDisk()
tgeo.create_ds(ds, r2)
tgeo.node_rotation(norm=np.array([1, 0, 0]), theta=th_loc)
tgeo.move(np.array((r1, 0, 0)))
# tgeo.show_nodes()
tgeo_list = []
rot_dth = 2 * np.pi / ndsk_each
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + np.pi / 4
# rot_th = i0 * rot_dth
tgeo21 = tgeo.copy()
tgeo21.node_rotation(norm=np.array([0, 0, 1]), theta=rot_th, rotation_origin=np.zeros(3))
tgeo22 = tgeo21.copy()
tgeo_list.append(tgeo21)
tgeo22.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo23 = tgeo21.copy()
tgeo_list.append(tgeo22)
tgeo23.node_rotation(norm=np.array([0, 1, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tgeo_list.append(tgeo23)
# tgeo3 = base_geo()
# tgeo3.combine(tgeo_list)
# tgeo3.show_nodes(linestyle='')
tobj_list = []
for i0, tgeo in enumerate(tgeo_list):
tobj = sf.StokesFlowObj()
tobj.set_matrix_method(matrix_method) # the geo is regularizeDisk
tobj.set_data(f_geo=tgeo, u_geo=tgeo, name=namehandle + '%02d' % i0)
tobj_list.append(tobj)
return tobj_list
def create_helicoid_comp(*args, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
helicoid_list = create_helicoid_list(*args, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
# print(tobj)
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def obj2helicoid_list(tobj0, **problem_kwargs):
# assert 1 == 2
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
tobj = tobj0.copy()
tobj.move(np.array((helicoid_r, 0, 0)))
tobj_list = []
rot_dth = 2 * np.pi / ndsk_each
namehandle = tobj.get_name()
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + np.pi / 4
# rot_th = i0 * rot_dth
tobj21 = tobj.copy()
tobj21.set_name('%s_%02d_%01d' % (namehandle, i0, 1))
tobj21.node_rotation(norm=np.array([0, 0, 1]), theta=rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobj21)
tobj22 = tobj21.copy()
tobj22.set_name('%s_%02d_%01d' % (namehandle, i0, 2))
tobj22.node_rotation(norm=np.array([1, 0, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tobj_list.append(tobj22)
tobj23 = tobj21.copy()
tobj23.set_name('%s_%02d_%01d' % (namehandle, i0, 3))
tobj23.node_rotation(norm=np.array([0, 1, 0]), theta=np.pi / 2, rotation_origin=np.zeros(3))
tobj_list.append(tobj23)
return tobj_list
def obj2helicoid_list_v2(tobj0, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
assert np.isclose(np.linalg.norm(tobj0.get_u_geo().get_center()), 0)
namehandle = tobj0.get_name()
t1 = helicoid_r / np.sqrt(2)
tobj0.move((t1, t1, 0))
tobj1 = tobj0.copy()
tobj1.node_rotation(np.array((1, 0, 0)), np.pi / 2, rotation_origin=np.zeros(3))
tobj2 = tobj0.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj_list = []
rot_dth = 2 * np.pi / ndsk_each
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + helicoid_th0
for i1, tobji in enumerate((tobj0, tobj1, tobj2)):
tobji_i0 = tobji.copy()
tobji_i0.set_name('%s_%02d_%01d' % (namehandle, i0, i1))
tobji_i0.node_rotation(np.array((0, 0, 1)), rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobji_i0)
return tobj_list
def obj2helicoid_list_v3(tobj, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
assert np.isclose(np.linalg.norm(tobj.get_u_geo().get_center()), 0)
namehandle = tobj.get_name()
rot_dth = 2 * np.pi / ndsk_each
tobj.move((helicoid_r, 0, 0))
tobj0 = tobj.copy()
tobj0.node_rotation(np.array((0, 0, 1)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj1 = tobj.copy()
tobj1.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj1.node_rotation(np.array((0, 1, 0)), rot_dth / 2, rotation_origin=np.zeros(3))
tobj2 = tobj.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj2.node_rotation(np.array((0, 1, 0)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj_list = []
for i0 in range(ndsk_each):
rot_th = i0 * rot_dth + helicoid_th0
for i1, tobji in enumerate((tobj0, tobj1, tobj2)):
tobji_i0 = tobji.copy()
tobji_i0.set_name('%s_%02d_%01d' % (namehandle, i0, i1))
tobji_i0.node_rotation(np.array((0, 0, 1)), rot_th, rotation_origin=np.zeros(3))
tobj_list.append(tobji_i0)
return tobj_list
def obj2helicoid_list_selfRotate(tobj, **problem_kwargs):
helicoid_r = problem_kwargs['helicoid_r']
ndsk_each = problem_kwargs['helicoid_ndsk_each']
assert ndsk_each == 4
# helicoid_th0 = problem_kwargs['helicoid_th0'] if 'helicoid_th0' in problem_kwargs.keys() else 0
assert np.isclose(np.linalg.norm(tobj.get_u_geo().get_center()), 0)
# namehandle = tobj.get_name()
rot_dth = 2 * np.pi / ndsk_each
tobj.move((helicoid_r, 0, 0))
tobj0 = tobj.copy()
tobj0.node_rotation(np.array((0, 0, 1)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj1 = tobj.copy()
tobj1.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj1.node_rotation(np.array((0, 1, 0)), rot_dth / 2, rotation_origin=np.zeros(3))
tobj2 = tobj.copy()
tobj2.node_rotation(np.array((1, 0, 0)), -np.pi / 2, rotation_origin=np.zeros(3))
tobj2.node_rotation(np.array((0, 1, 0)), -rot_dth / 2, rotation_origin=np.zeros(3))
tobj_list = [tobj0, tobj1, tobj2]
return tobj_list
def obj2helicoid_comp(tobj0, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
# helicoid_list = obj2helicoid_list(tobj0, *args, **kwargs)
helicoid_list = obj2helicoid_list_v3(tobj0, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def obj2helicoid_comp_selfRotate(tobj0, **kwargs):
update_order = kwargs['update_order'] if 'update_order' in kwargs.keys() else 1
update_fun = kwargs['update_fun'] if 'update_fun' in kwargs.keys() else Adams_Bashforth_Methods
# helicoid_list = obj2helicoid_list(tobj0, *args, **kwargs)
helicoid_list = obj2helicoid_list_selfRotate(tobj0, **kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
update_fun=update_fun, update_order=update_order)
return helicoid_comp
def creat_helicoid_dumb(**problem_kwargs):
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
sphere_geo1 = sphere_geo0.copy()
sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
dumb_geo = base_geo()
dumb_geo.combine([sphere_geo0, sphere_geo1], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
dumb_geo.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
tobj = sf.StokesFlowObj()
tobj.set_data(dumb_geo, dumb_geo, 'helicoid_dumb')
helicoid_comp = obj2helicoid_comp(tobj, **problem_kwargs)
return helicoid_comp
def creat_helicoid_dumb_selfRotate(**problem_kwargs):
matrix_method = problem_kwargs['matrix_method']
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
ds = problem_kwargs['ds']
rs = problem_kwargs['rs']
es = problem_kwargs['es']
sphere_geo0 = sphere_geo()
sphere_geo0.create_delta(ds, rs)
sphere_geo0f = sphere_geo0.copy()
sphere_geo0f.node_zoom(1 + ds * es / rs)
sphere_geo1 = sphere_geo0.copy()
sphere_geo1f = sphere_geo0f.copy()
sphere_geo0.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1.move(np.array((0, 0, -dumb_d / 2)))
sphere_geo0f.move(np.array((0, 0, dumb_d / 2)))
sphere_geo1f.move(np.array((0, 0, -dumb_d / 2)))
dumb_geo = base_geo()
dumb_geo.combine([sphere_geo0, sphere_geo1], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
dumb_geo.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
dumb_geof = base_geo()
dumb_geof.combine([sphere_geo0f, sphere_geo1f], origin=np.zeros(3), geo_norm=np.array((0, 0, 1)))
dumb_geof.node_rotation(norm=np.array((1, 0, 0)), theta=dumb_theta)
tobj = sf.obj_dic[matrix_method]()
tobj.set_data(dumb_geof, dumb_geo, name='helicoid_dumb')
helicoid_comp = obj2helicoid_comp_selfRotate(tobj, **problem_kwargs)
return helicoid_comp
| 0.002428 |
import uuid
from django import forms
from django.utils.translation import ugettext as _
from django.utils.text import slugify
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div, Field, HTML, Layout, Fieldset
from crispy_forms.bootstrap import PrependedText
from django.template import RequestContext
from .models import Event
class EventForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
super(EventForm, self).__init__(*args, **kwargs)
@property
def helper(self):
helper = FormHelper()
helper.render_unmentioned_fields = True
helper.form_tag = False
helper.layout = Layout(
Fieldset(
_(u'Event'),
Div(Div(Field('summary', placeholder=_(u'Event summary')), css_class='col-sm-6'),
Div(Field('category', placeholder=_("Category")), css_class='col-sm-6'),
css_class='row'
),
Div(Div(Field('tags', placeholder=_("keyword, keyword 1, key word 2")), css_class="col-sm-12"),
css_class="row"
),
Div(Div(Field('description', help_text=_('tada')), css_class="col-sm-12"),
css_class="row"
),
),
Fieldset(
_(u'Contact'),
Div(Div(Field('email', placeholder=_("Contact Email")), css_class="col-sm-6"),
Div(Field('web', placeholder=_("Web site URL")), css_class="col-sm-6"),
css_class="row"
),
),
Fieldset(
_(u'Date'),
Div(Div(Field('dtstart', placeholder=_(u'From')), css_class='col-sm-6'),
Div(Field('dtend', placeholder=_(u'To')), css_class='col-sm-6'),
css_class='row'
),
Div(Div(Field('allday'), css_class='col-sm-12'),
css_class='row'
),
),
Fieldset(
_(u'Location'),
Div(Div(Field('country', placeholder=_("country")), HTML('''{% load i18n %}<p class="help-block"><button id="geosearch-button" type="button" class="btn btn-default">
{% trans 'Geolocate' %}</button> Try to geolocate from the given address.
</p>'''), css_class="col-sm-6"),
Div(Field('address', placeholder=_(
'''Street
\n
ZipCode City''')), css_class="col-sm-6"),
css_class="row"
),
Div(Div(HTML('''<div style="height:350px;" id="map"></div><br>'''), css_class="col-sm-12"),
css_class="row"),
Div(Div(Field('lat', placeholder=_("Location latitude")), css_class="col-sm-6"),
Div(Field('lon', placeholder=_("Location longitude")), css_class="col-sm-6"),
css_class="row"),
),
)
return helper
class Meta:
model = Event
widgets = {
'description': forms.Textarea(attrs={'cols': 10, 'rows': 6}),
'address': forms.Textarea(attrs={'cols': 10, 'rows': 3}),
'dtstart': forms.DateInput(attrs={'class': 'datepicker'}),
'dtend': forms.DateInput(attrs={'class': 'datepicker'}),
'lat':forms.HiddenInput(),
'lon':forms.HiddenInput(),
}
#fields = "__all__"
exclude =['slug', 'pub_status', 'status', 'sequence', 'creation_user']
class Media:
js = ["/static/thedirectory/js/map_mini.js"]
def save(self, commit=True):
u'''Add the slug value'''
event = super(EventForm, self).save(commit=False)
if not event.slug:
event.slug = slugify(self.cleaned_data.get('summary'))
try:
Event.objects.get(slug=event.slug)
event.slug = "%s-%s" % (event.slug, str(uuid.uuid4())[:5])
except Event.DoesNotExist:
pass
if commit:
event.creation_user = self.user
event.save()
self.save_m2m()
return event
| 0.005959 |
"""Views handler for ask admin page rendering."""
from django.core.urlresolvers import reverse
import simplejson as json
from django.http import Http404, HttpResponse
from django.template.loader import render_to_string
from django.core.mail.message import EmailMultiAlternatives
from apps.managers.challenge_mgr import challenge_mgr
from apps.widgets.ask_admin.forms import FeedbackForm
def supply(request, page_name):
"""Supply view_objects for widget rendering, returns form."""
_ = request
_ = page_name
form = FeedbackForm(auto_id="help_%s")
form.url = reverse("help_index")
return {
"form": form,
}
def send_feedback(request):
"""send feedback."""
if request.method == "POST":
form = FeedbackForm(request.POST)
if form.is_valid():
html_message = render_to_string("email/ask_admin.html", {
"user": request.user,
"url": form.cleaned_data["url"],
"question": form.cleaned_data["question"],
})
message = render_to_string("email/ask_admin.txt", {
"user": request.user,
"url": form.cleaned_data["url"],
"question": form.cleaned_data["question"],
})
challenge = challenge_mgr.get_challenge()
# Using adapted version from Django source code
subject = u'[%s] %s asked a question' % (
challenge.name,
request.user.get_profile().name)
if challenge.email_enabled or True:
mail = EmailMultiAlternatives(subject, message, challenge.contact_email,
[challenge.contact_email, ], headers={
"Reply-To": request.user.email})
mail.attach_alternative(html_message, 'text/html')
print html_message
mail.send()
#print "email sent %s" % html_message
if request.is_ajax():
return HttpResponse(json.dumps({"success": True}),
mimetype="application/json")
raise Http404
| 0.001871 |
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
from six.moves import winreg
from six import text_type
try:
import ctypes
from ctypes import wintypes
except ValueError:
# ValueError is raised on non-Windows systems for some horrible reason.
raise ImportError("Running tzwin on non-Windows system")
from ._common import tzrangebase
__all__ = ["tzwin", "tzwinlocal", "tzres"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
return TZKEYNAME
TZKEYNAME = _settzkeyname()
class tzres(object):
"""
Class for accessing `tzres.dll`, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc='tzres.dll'):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL('user32')
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
..note:
Offsets found in the registry are generally of the form
`@tzres.dll,-114`. The offset in this case if 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
class tzwinbase(tzrangebase):
"""tzinfo class based on win32's timezones available in the registry."""
def __init__(self):
raise NotImplementedError('tzwinbase is an abstract base class')
def __eq__(self, other):
# Compare on all relevant dimensions, including name.
if not isinstance(other, tzwinbase):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._stddayofweek == other._stddayofweek and
self._dstdayofweek == other._dstdayofweek and
self._stdweeknumber == other._stdweeknumber and
self._dstweeknumber == other._dstweeknumber and
self._stdhour == other._stdhour and
self._dsthour == other._dsthour and
self._stdminute == other._stdminute and
self._dstminute == other._dstminute and
self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr)
@staticmethod
def list():
"""Return a list of all time zones known to the system."""
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
return result
def display(self):
return self._display
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff
def _get_hasdst(self):
return self._dstmonth != 0
@property
def _dst_base_offset(self):
return self._dst_base_offset_
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset-tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._std_abbr = keydict["StandardName"]
self._dst_abbr = keydict["DaylightName"]
try:
tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
sn=self._std_abbr)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
stdoffset = -keydict["Bias"]-keydict["StandardBias"]
dstoffset = stdoffset-keydict["DaylightBias"]
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# For reasons unclear, in this particular key, the day of week has been
# moved to the END of the SYSTEMTIME structure.
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:5]
self._stddayofweek = tup[7]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:5]
self._dstdayofweek = tup[7]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwinlocal()"
def __str__(self):
# str will return the standard name, not the daylight name.
return "tzwinlocal(%s)" % repr(self._std_abbr)
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if (wd.month != month):
wd -= ONEWEEK
return wd
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith('@tzres'):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip('\x00') # Remove trailing nulls
dout[key_name] = value
return dout
| 0.000618 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
feed2db works to turn text-based feed list into database
"""
# @author chengdujin
# @contact chengdujin@gmail.com
# @created Jul. 30, 2013
import sys
reload(sys)
sys.setdefaultencoding('UTF-8')
sys.path.append('../..')
from config.settings import Collection
from config.settings import db
# CONSTANTS
from config.settings import FEED_REGISTRAR
#FILE_PREFIX = '/home/work/newsman/newsman/bin/text_based_feeds/feed_lists/'
#FILE_PREFIX = '/home/users/jinyuan/newsman/newsman/bin/text_based_feeds
# /feed_lists/'
#FILE_PREFIX = '/home/ubuntu/newsman/newsman/bin/text_based_feeds/feed_lists/'
#FILE_PREFIX = '/home/jinyuan/Downloads/newsman/newsman/bin/text_based_feeds
# /feed_lists/'
def _parse_task(line):
"""
read *_feeds_list.txt
"""
line = line.strip()
if line:
task = line.strip().split('*|*')
# task[1] refers to categories
if len(task) == 5:
return task[0].strip(), task[1].strip(), task[2].strip(), task[
3].strip(), task[4].strip(), None
else:
return task[0].strip(), task[1].strip(), task[2].strip(), task[
3].strip(), task[4].strip(), task[5].strip()
else:
return None
def _convert(language='en', country=None):
"""
turn text-based feed infor into database items
Note. 1. categories: [(), ()]
"""
# read in file content
feeds_list = open('%s%s_%s_feeds_list' %
(FILE_PREFIX, language, country), 'r')
lines = feeds_list.readlines()
feeds_list.close()
link_dict = title_dict = {}
for line in lines:
if line.strip():
language, category, transcoder, link, title, labels = _parse_task(
line)
# link check
if link not in link_dict:
link_dict[link] = title
else:
print link, title
# title check
if title not in title_dict:
title_dict[title] = link
else:
print title, link
if __name__ == "__main__":
if len(sys.argv) > 1:
_convert(sys.argv[1], sys.argv[2])
else:
print 'Please indicate a language and country'
| 0.003552 |
#!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
"""
The mem command provides direct access to read and write physical memory.
"""
import os
import time
import chipsec_util
import chipsec.defines
import chipsec.file
from chipsec.logger import print_buffer
from chipsec.command import BaseCommand
def read_mem(pa, size = chipsec.defines.BOUNDARY_4KB):
try:
buffer = chipsec_util._cs.mem.read_physical_mem( pa, size )
except:
buffer = None
return buffer
def dump_region_to_path(path, pa_start, pa_end):
pa = (pa_start + chipsec.defines.ALIGNED_4KB) & ~chipsec.defines.ALIGNED_4KB
end = pa_end & ~chipsec.defines.ALIGNED_4KB
head_len = pa - pa_start
tail_len = pa_end - end
f = None
# read leading bytes to the next boundary
if (head_len > 0):
b = read_mem(pa_start, head_len)
if b is not None:
fname = os.path.join(path, "m%016X.bin" % pa_start)
f = open(fname, 'wb')
f.write(b)
while pa < end:
b = read_mem(pa)
if b is not None:
if f is None:
fname = os.path.join(path, "m%016X.bin" % pa)
f = open(fname, 'wb')
f.write(b)
else:
if f is not None:
f.close()
f = None
pa += chipsec.defines.BOUNDARY_4KB
# read trailing bytes
if (tail_len > 0):
b = read_mem(end, tail_len)
if b is not None:
if f is None:
fname = os.path.join(path, "m%016X.bin" % end)
f = open(fname, 'wb')
f.write(b)
if f is not None:
f.close()
# Physical Memory
class MemCommand(BaseCommand):
"""
>>> chipsec_util mem <op> <physical_address> <length> [value|buffer_file]
>>>
>>> <physical_address> : 64-bit physical address
>>> <op> : read|readval|write|writeval|allocate|pagedump
>>> <length> : byte|word|dword or length of the buffer from <buffer_file>
>>> <value> : byte, word or dword value to be written to memory at <physical_address>
>>> <buffer_file> : file with the contents to be written to memory at <physical_address>
Examples:
>>> chipsec_util mem <op> <physical_address> <length> [value|file]
>>> chipsec_util mem readval 0xFED40000 dword
>>> chipsec_util mem read 0x41E 0x20 buffer.bin
>>> chipsec_util mem writeval 0xA0000 dword 0x9090CCCC
>>> chipsec_util mem write 0x100000000 0x1000 buffer.bin
>>> chipsec_util mem write 0x100000000 0x10 000102030405060708090A0B0C0D0E0F
>>> chipsec_util mem allocate 0x1000
>>> chipsec_util mem pagedump 0xFED00000 0x100000
>>> chipsec_util mem search 0xF0000 0x10000 _SM_
"""
def requires_driver(self):
# No driver required when printing the util documentation
if len(self.argv) < 3:
return False
return True
def run(self):
size = 0x100
if len(self.argv) < 3:
print MemCommand.__doc__
return
op = self.argv[2]
t = time.time()
if 'allocate' == op and 4 == len(self.argv):
size = int(self.argv[3],16)
(va, pa) = self.cs.mem.alloc_physical_mem( size )
self.logger.log( '[CHIPSEC] Allocated %X bytes of physical memory: VA = 0x%016X, PA = 0x%016X' % (size, va, pa) )
elif 'search' == op and len(self.argv) > 5:
phys_address = int(self.argv[3],16)
size = int(self.argv[4],16)
buffer = self.cs.mem.read_physical_mem( phys_address, size )
offset = buffer.find(self.argv[5])
if offset != -1:
self.logger.log( '[CHIPSEC] search buffer from memory: PA = 0x%016X, len = 0x%X, target address= 0x%X..' % (phys_address, size, phys_address + offset) )
else:
self.logger.log( '[CHIPSEC] search buffer from memory: PA = 0x%016X, len = 0x%X, can not find the target in the searched range..' % (phys_address, size) )
elif 'pagedump' == op and len(self.argv) > 3:
start = long(self.argv[3],16)
length = long(self.argv[4],16) if len(self.argv) > 4 else chipsec.defines.BOUNDARY_4KB
end = start + length
dump_region_to_path( chipsec.file.get_main_dir(), start, end )
elif 'read' == op:
phys_address = int(self.argv[3],16)
size = int(self.argv[4],16) if len(self.argv) > 4 else 0x100
self.logger.log( '[CHIPSEC] reading buffer from memory: PA = 0x%016X, len = 0x%X..' % (phys_address, size) )
buffer = self.cs.mem.read_physical_mem( phys_address, size )
if len(self.argv) > 5:
buf_file = self.argv[5]
chipsec.file.write_file( buf_file, buffer )
self.logger.log( "[CHIPSEC] written 0x%X bytes to '%s'" % (len(buffer), buf_file) )
else:
print_buffer( buffer )
elif 'readval' == op:
phys_address = int(self.argv[3],16)
width = 0x4
if len(self.argv) > 4:
width = chipsec_util.get_option_width(self.argv[4]) if chipsec_util.is_option_valid_width(self.argv[4]) else int(self.argv[4],16)
self.logger.log( '[CHIPSEC] reading %X-byte value from PA 0x%016X..' % (width, phys_address) )
if 0x1 == width: value = self.cs.mem.read_physical_mem_byte ( phys_address )
elif 0x2 == width: value = self.cs.mem.read_physical_mem_word ( phys_address )
elif 0x4 == width: value = self.cs.mem.read_physical_mem_dword( phys_address )
self.logger.log( '[CHIPSEC] value = 0x%X' % value )
elif 'write' == op:
phys_address = int(self.argv[3],16)
if len(self.argv) > 4:
size = int(self.argv[4],16)
else:
self.logger.error( "must specify <length> argument in 'mem write'" )
return
if len(self.argv) > 5:
buf_file = self.argv[5]
if not os.path.exists( buf_file ):
#buffer = buf_file.decode('hex')
try:
buffer = bytearray.fromhex(buf_file)
except ValueError, e:
self.logger.error( "incorrect <value> specified: '%s'" % buf_file )
self.logger.error( str(e) )
return
self.logger.log( "[CHIPSEC] read 0x%X hex bytes from command-line: %s'" % (len(buffer), buf_file) )
else:
buffer = chipsec.file.read_file( buf_file )
self.logger.log( "[CHIPSEC] read 0x%X bytes from file '%s'" % (len(buffer), buf_file) )
if len(buffer) < size:
self.logger.error( "number of bytes read (0x%X) is less than the specified <length> (0x%X)" % (len(buffer),size) )
return
self.logger.log( '[CHIPSEC] writing buffer to memory: PA = 0x%016X, len = 0x%X..' % (phys_address, size) )
self.cs.mem.write_physical_mem( phys_address, size, buffer )
else:
self.logger.error( "must specify <buffer>|<file> argument in 'mem write'" )
return
elif 'writeval' == op:
phys_address = int(self.argv[3],16)
if len(self.argv) > 4:
width = chipsec_util.get_option_width(self.argv[4]) if chipsec_util.is_option_valid_width(self.argv[4]) else int(self.argv[4],16)
else:
self.logger.error( "must specify <length> argument in 'mem writeval' as one of %s" % chipsec_util.CMD_OPTS_WIDTH )
return
if len(self.argv) > 5:
value = int(self.argv[5],16)
else:
self.logger.error( "must specify <value> argument in 'mem writeval'" )
return
self.logger.log( '[CHIPSEC] writing %X-byte value 0x%X to PA 0x%016X..' % (width, value, phys_address) )
if 0x1 == width: self.cs.mem.write_physical_mem_byte ( phys_address, value )
elif 0x2 == width: self.cs.mem.write_physical_mem_word ( phys_address, value )
elif 0x4 == width: self.cs.mem.write_physical_mem_dword( phys_address, value )
else:
print MemCommand.__doc__
return
self.logger.log( "[CHIPSEC] (mem) time elapsed %.3f" % (time.time()-t) )
commands = { 'mem': MemCommand }
| 0.018573 |
# -*- coding: utf-8 -*-
import pytest
from django.utils import timezone
from nose.tools import * # noqa
from framework.auth.core import Auth
from osf_tests.factories import AuthUserFactory, ProjectFactory, UserFactory
from scripts import parse_citation_styles
from tests.base import OsfTestCase
from osf.models import OSFUser as User, AbstractNode as Node
from website.citations.utils import datetime_to_csl
from website.util import api_url_for
pytestmark = pytest.mark.django_db
class CitationsUtilsTestCase(OsfTestCase):
def test_datetime_to_csl(self):
# Convert a datetime instance to csl's date-variable schema
now = timezone.now()
assert_equal(
datetime_to_csl(now),
{'date-parts': [[now.year, now.month, now.day]]},
)
class CitationsNodeTestCase(OsfTestCase):
def setUp(self):
super(CitationsNodeTestCase, self).setUp()
self.node = ProjectFactory()
def tearDown(self):
super(CitationsNodeTestCase, self).tearDown()
Node.remove()
User.remove()
def test_csl_single_author(self):
# Nodes with one contributor generate valid CSL-data
assert_equal(
self.node.csl,
{
'publisher': 'Open Science Framework',
'author': [{
'given': self.node.creator.given_name,
'family': self.node.creator.family_name,
}],
'URL': self.node.display_absolute_url,
'issued': datetime_to_csl(self.node.logs.latest().date),
'title': self.node.title,
'type': 'webpage',
'id': self.node._id,
},
)
def test_csl_multiple_authors(self):
# Nodes with multiple contributors generate valid CSL-data
user = UserFactory()
self.node.add_contributor(user)
self.node.save()
assert_equal(
self.node.csl,
{
'publisher': 'Open Science Framework',
'author': [
{
'given': self.node.creator.given_name,
'family': self.node.creator.family_name,
},
{
'given': user.given_name,
'family': user.family_name,
}
],
'URL': self.node.display_absolute_url,
'issued': datetime_to_csl(self.node.logs.latest().date),
'title': self.node.title,
'type': 'webpage',
'id': self.node._id,
},
)
def test_non_visible_contributors_arent_included_in_csl(self):
node = ProjectFactory()
visible = UserFactory()
node.add_contributor(visible, auth=Auth(node.creator))
invisible = UserFactory()
node.add_contributor(invisible, auth=Auth(node.creator), visible=False)
node.save()
assert_equal(len(node.csl['author']), 2)
expected_authors = [
contrib.csl_name for contrib in [node.creator, visible]
]
assert_equal(node.csl['author'], expected_authors)
class CitationsUserTestCase(OsfTestCase):
def setUp(self):
super(CitationsUserTestCase, self).setUp()
self.user = UserFactory()
def tearDown(self):
super(CitationsUserTestCase, self).tearDown()
User.remove()
def test_user_csl(self):
# Convert a User instance to csl's name-variable schema
assert_equal(
self.user.csl_name,
{
'given': self.user.given_name,
'family': self.user.family_name,
},
)
class CitationsViewsTestCase(OsfTestCase):
@pytest.fixture(autouse=True)
def _parsed_citation_styles(self):
# populate the DB with parsed citation styles
try:
parse_citation_styles.main()
except OSError:
pass
def test_list_styles(self):
# Response includes a list of available citation styles
response = self.app.get(api_url_for('list_citation_styles'))
assert_true(response.json)
assert_equal(
len(
[
style for style in response.json['styles']
if style.get('id') == 'bibtex'
]
),
1,
)
def test_list_styles_filter(self):
# Response includes a list of available citation styles
response = self.app.get(api_url_for('list_citation_styles', q='bibtex'))
assert_true(response.json)
assert_equal(
len(response.json['styles']), 1
)
assert_equal(
response.json['styles'][0]['id'], 'bibtex'
)
def test_node_citation_view(self):
node = ProjectFactory()
user = AuthUserFactory()
node.add_contributor(user)
node.save()
response = self.app.get("/api/v1" + "/project/" + node._id + "/citation/", auto_follow=True, auth=user.auth)
assert_true(response.json)
| 0.000777 |
# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python, and influenced by
Apache's log4j system.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, cPickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
try:
import codecs
except ImportError:
codecs = None
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
#print "No need to rollover: %d, %d" % (t, self.rolloverAt)
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
# find the oldest log file and delete it
#s = glob.glob(self.baseFilename + ".20*")
#if len(s) > self.backupCount:
# s.sort()
# os.remove(s[0])
for s in self.getFilesToDelete():
os.remove(s)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
currentTime = int(time.time())
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
if not os.path.exists(self.baseFilename):
self.dev, self.ino = -1, -1
else:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
if not os.path.exists(self.baseFilename):
stat = None
changed = 1
else:
stat = os.stat(self.baseFilename)
changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record) # just to get traceback text into record.exc_text
record.exc_info = None # to avoid Unpickleable error
s = cPickle.dumps(record.__dict__, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=socket.SOCK_DGRAM):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, basestring):
self.unixsocket = 1
self._connect_unixsocket(address)
else:
self.unixsocket = 0
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.formatter = None
def _connect_unixsocket(self, address):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string = '<%d>%s\000'
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, basestring):
facility = self.facility_names[facility]
if isinstance(priority, basestring):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
if self.unixsocket:
self.socket.close()
logging.Handler.close(self)
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record) + '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
# Message is a string. Convert to bytes as required by RFC 5424
if type(msg) is unicode:
msg = msg.encode('utf-8')
if codecs:
msg = codecs.BOM_UTF8 + msg
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, basestring):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import httplib, urllib
host = self.host
h = httplib.HTTP(host)
url = self.url
data = urllib.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
h.endheaders(data if self.method == "POST" else None)
h.getreply() #can't do anything with the result
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.buffer = []
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
"""
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.target = None
BufferingHandler.close(self)
| 0.003544 |
# -*- coding: utf-8 -*-
"""
Common Utilities run most requests
"""
# =============================================================================
# Special local requests (e.g. from scheduler)
#
if request.is_local:
# This is a request made from the local server
f = get_vars.get("format", None)
auth_token = get_vars.get("subscription", None)
if auth_token and f == "msg":
# Subscription lookup request (see S3Notify.notify())
rtable = s3db.pr_subscription_resource
stable = s3db.pr_subscription
utable = s3db.pr_person_user
join = [stable.on(stable.id == rtable.subscription_id),
utable.on(utable.pe_id == stable.pe_id)]
user = db(rtable.auth_token == auth_token).select(utable.user_id,
join=join,
limitby=(0, 1)) \
.first()
if user:
# Impersonate subscriber
auth.s3_impersonate(user.user_id)
else:
# Anonymous request
auth.s3_impersonate(None)
# =============================================================================
# Check Permissions & fail as early as we can
#
# Set user roles
# - requires access to tables
auth.s3_set_roles()
# Check access to this controller
if not auth.permission.has_permission("read"):
auth.permission.fail()
# =============================================================================
# Initialize Date/Time Settings
#
s3base.s3_get_utc_offset()
# =============================================================================
# Menus
#
from s3layouts import *
import s3menus as default_menus
S3MainMenu = default_menus.S3MainMenu
S3OptionsMenu = default_menus.S3OptionsMenu
current.menu = Storage(oauth="", options=None, override={})
if auth.permission.format in ("html"):
theme = settings.get_theme()
package = "applications.%s.modules.templates.%%s.menus" % appname
menu_locations = []
if theme != "default":
if s3.theme_location:
theme = "%s.%s" % (s3.theme_location[:-1], theme)
menu_locations.append(theme)
else:
template = settings.get_template()
if isinstance(template, (tuple, list)):
menu_locations.extend(template)
else:
menu_locations.append(template)
for name in menu_locations:
if name == "default":
# Using s3menus.py
continue
try:
deployment_menus = __import__(package % name,
fromlist=["S3MainMenu",
"S3OptionsMenu",
],
)
except ImportError:
# No menus.py (using except is faster than os.stat)
continue
else:
if hasattr(deployment_menus, "S3MainMenu"):
S3MainMenu = deployment_menus.S3MainMenu
if hasattr(deployment_menus, "S3OptionsMenu"):
S3OptionsMenu = deployment_menus.S3OptionsMenu
# Instantiate main menu
main = S3MainMenu.menu()
else:
main = None
menu = current.menu
menu["main"] = main
# Override controller menus
# @todo: replace by current.menu.override
s3_menu_dict = {}
# -----------------------------------------------------------------------------
def s3_rest_controller(prefix=None, resourcename=None, **attr):
"""
Helper function to apply the S3Resource REST interface
@param prefix: the application prefix
@param resourcename: the resource name (without prefix)
@param attr: additional keyword parameters
Any keyword parameters will be copied into the output dict (provided
that the output is a dict). If a keyword parameter is callable, then
it will be invoked, and its return value will be added to the output
dict instead. The callable receives the S3Request as its first and
only parameter.
CRUD can be configured per table using:
s3db.configure(tablename, **attr)
*** Redirection:
create_next URL to redirect to after a record has been created
update_next URL to redirect to after a record has been updated
delete_next URL to redirect to after a record has been deleted
*** Form configuration:
list_fields list of names of fields to include into list views
subheadings Sub-headings (see separate documentation)
listadd Enable/Disable add-form in list views
*** CRUD configuration:
editable Allow/Deny record updates in this table
deletable Allow/Deny record deletions in this table
insertable Allow/Deny record insertions into this table
copyable Allow/Deny record copying within this table
*** Callbacks:
create_onvalidation Function for additional record validation on create
create_onaccept Function after successful record insertion
update_onvalidation Function for additional record validation on update
update_onaccept Function after successful record update
onvalidation Fallback for both create_onvalidation and update_onvalidation
onaccept Fallback for both create_onaccept and update_onaccept
ondelete Function after record deletion
"""
# Customise Controller from Template
attr = settings.customise_controller("%s_%s" % (prefix or request.controller,
resourcename or request.function),
**attr)
# Parse the request
r = s3_request(prefix, resourcename)
# Customize target resource(s) from Template
r.customise_resource()
# Configure standard method handlers
set_handler = r.set_handler
from s3db.cms import S3CMS
set_handler("cms", S3CMS)
set_handler("compose", s3base.S3Compose)
# @ToDo: Make work in Component Tabs:
set_handler("copy", lambda r, **attr: \
redirect(URL(args="create",
vars={"from_record":r.id})))
set_handler("deduplicate", s3base.S3Merge)
set_handler("filter", s3base.S3Filter)
set_handler("hierarchy", s3base.S3HierarchyCRUD)
set_handler("import", s3base.S3Importer)
set_handler("xform", s3base.S3XForms)
set_handler("map", s3base.S3Map)
set_handler("profile", s3base.S3Profile)
set_handler("report", s3base.S3Report)
set_handler("report", s3base.S3Report, transform=True)
set_handler("timeplot", s3base.S3TimePlot)
set_handler("grouped", s3base.S3GroupedItemsReport)
set_handler("search_ac", s3base.search_ac)
set_handler("summary", s3base.S3Summary)
# Don't load S3PDF unless needed (very slow import with Reportlab)
method = r.method
if method == "import" and r.representation == "pdf":
from s3.s3pdf import S3PDF
set_handler("import", S3PDF(),
http = ("GET", "POST"),
representation="pdf")
# Plugin OrgRoleManager when appropriate
s3base.S3OrgRoleManager.set_method(r)
# Execute the request
output = r(**attr)
if isinstance(output, dict) and \
method in (None,
"report",
"search",
"datatable",
"datatable_f",
"summary"):
if s3.actions is None:
# Add default action buttons
prefix, name, table, tablename = r.target()
authorised = s3_has_permission("update", tablename)
# If a component has components itself, then action buttons
# can be forwarded to the native controller by setting native=True
if r.component and s3db.has_components(table):
native = output.get("native", False)
else:
native = False
# Get table config
get_config = s3db.get_config
listadd = get_config(tablename, "listadd", True)
editable = get_config(tablename, "editable", True) and \
not auth.permission.ownership_required("update", table)
deletable = get_config(tablename, "deletable", True)
copyable = get_config(tablename, "copyable", False)
# URL to open the resource
open_url = r.resource.crud._linkto(r,
authorised=authorised,
update=editable,
native=native)("[id]")
# Add action buttons for Open/Delete/Copy as appropriate
s3_action_buttons(r,
deletable=deletable,
copyable=copyable,
editable=editable,
read_url=open_url,
update_url=open_url
# To use modals
#update_url="%s.popup?refresh=list" % open_url
)
# Override Add-button, link to native controller and put
# the primary key into get_vars for automatic linking
if native and not listadd and \
s3_has_permission("create", tablename):
label = s3base.S3CRUD.crud_string(tablename,
"label_create")
hook = r.resource.components[name]
fkey = "%s.%s" % (name, hook.fkey)
get_vars_copy = get_vars.copy()
get_vars_copy.update({fkey: r.record[hook.fkey]})
url = URL(prefix, name, args=["create"], vars=get_vars_copy)
add_btn = A(label, _href=url, _class="action-btn")
output.update(add_btn=add_btn)
elif method not in ("import",
"review",
"approve",
"reject",
"deduplicate"):
s3.actions = None
if get_vars.tour:
output = s3db.tour_builder(output)
return output
# Enable access to this function from modules
current.rest_controller = s3_rest_controller
# END =========================================================================
| 0.00196 |
# -*- coding:utf-8 -*-
import datetime
import json
from flask import abort
from flask import current_app
from werkzeug.exceptions import BadRequest
from api.extensions import db
from api.extensions import rd
from api.lib.cmdb.cache import AttributeCache
from api.lib.cmdb.cache import CITypeCache
from api.lib.cmdb.ci_type import CITypeAttributeManager
from api.lib.cmdb.ci_type import CITypeManager
from api.lib.cmdb.const import CMDB_QUEUE
from api.lib.cmdb.const import ExistPolicy
from api.lib.cmdb.const import OperateType
from api.lib.cmdb.const import REDIS_PREFIX_CI
from api.lib.cmdb.const import RetKey
from api.lib.cmdb.history import AttributeHistoryManger
from api.lib.cmdb.history import CIRelationHistoryManager
from api.lib.cmdb.search.ci.db.query_sql import QUERY_CIS_BY_IDS
from api.lib.cmdb.search.ci.db.query_sql import QUERY_CIS_BY_VALUE_TABLE
from api.lib.cmdb.utils import TableMap
from api.lib.cmdb.utils import ValueTypeMap
from api.lib.cmdb.value import AttributeValueManager
from api.lib.decorator import kwargs_required
from api.lib.utils import handle_arg_list
from api.models.cmdb import CI
from api.models.cmdb import CIRelation
from api.models.cmdb import CITypeAttribute
from api.models.cmdb import CITypeRelation
from api.tasks.cmdb import ci_cache
from api.tasks.cmdb import ci_delete
from api.tasks.cmdb import ci_relation_cache
from api.tasks.cmdb import ci_relation_delete
class CIManager(object):
""" manage CI interface
"""
def __init__(self):
pass
@staticmethod
def get_type_name(ci_id):
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not existed".format(ci_id))
return CITypeCache.get(ci.type_id).name
@staticmethod
def confirm_ci_existed(ci_id):
return CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not existed".format(ci_id))
@classmethod
def get_ci_by_id(cls, ci_id, ret_key=RetKey.NAME, fields=None, need_children=True):
"""
:param ci_id:
:param ret_key: name, id, or alias
:param fields: attribute list
:param need_children:
:return:
"""
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not existed".format(ci_id))
res = dict()
if need_children:
children = CIRelationManager.get_children(ci_id, ret_key=ret_key) # one floor
res.update(children)
ci_type = CITypeCache.get(ci.type_id)
res["ci_type"] = ci_type.name
res.update(cls.get_cis_by_ids([str(ci_id)], fields=fields, ret_key=ret_key))
res['_type'] = ci_type.id
res['_id'] = ci_id
return res
@staticmethod
def get_ci_by_id_from_db(ci_id, ret_key=RetKey.NAME, fields=None, need_children=True, use_master=False):
"""
:param ci_id:
:param ret_key: name, id or alias
:param fields: list
:param need_children:
:param use_master: whether to use master db
:return:
"""
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not existed".format(ci_id))
res = dict()
if need_children:
children = CIRelationManager.get_children(ci_id, ret_key=ret_key) # one floor
res.update(children)
ci_type = CITypeCache.get(ci.type_id)
res["ci_type"] = ci_type.name
fields = CITypeAttributeManager.get_attr_names_by_type_id(ci.type_id) if not fields else fields
unique_key = AttributeCache.get(ci_type.unique_id)
_res = AttributeValueManager().get_attr_values(fields,
ci_id,
ret_key=ret_key,
unique_key=unique_key,
use_master=use_master)
res.update(_res)
res['type_id'] = ci_type.id
res['ci_id'] = ci_id
return res
def get_ci_by_ids(self, ci_id_list, ret_key=RetKey.NAME, fields=None):
return [self.get_ci_by_id(ci_id, ret_key=ret_key, fields=fields) for ci_id in ci_id_list]
@classmethod
def get_cis_by_type(cls, type_id, ret_key=RetKey.NAME, fields="", page=1, per_page=None):
cis = db.session.query(CI.id).filter(CI.type_id == type_id).filter(CI.deleted.is_(False))
numfound = cis.count()
cis = cis.offset((page - 1) * per_page).limit(per_page)
ci_ids = [str(ci.id) for ci in cis]
res = cls.get_cis_by_ids(ci_ids, ret_key, fields)
return numfound, page, res
@staticmethod
def ci_is_exist(unique_key, unique_value):
"""
:param unique_key: is a attribute
:param unique_value:
:return:
"""
value_table = TableMap(attr_name=unique_key.name).table
unique = value_table.get_by(attr_id=unique_key.id,
value=unique_value,
to_dict=False,
first=True)
if unique:
return CI.get_by_id(unique.ci_id)
@staticmethod
def _delete_ci_by_id(ci_id):
ci = CI.get_by_id(ci_id)
ci.delete() # TODO: soft delete
@classmethod
def add(cls, ci_type_name, exist_policy=ExistPolicy.REPLACE, _no_attribute_policy=ExistPolicy.IGNORE, **ci_dict):
"""
:param ci_type_name:
:param exist_policy: replace or reject or need
:param _no_attribute_policy: ignore or reject
:param ci_dict:
:return:
"""
ci_type = CITypeManager.check_is_existed(ci_type_name)
unique_key = AttributeCache.get(ci_type.unique_id) or abort(400, 'illegality unique attribute')
unique_value = ci_dict.get(unique_key.name)
unique_value = unique_value or ci_dict.get(unique_key.alias)
unique_value = unique_value or ci_dict.get(unique_key.id)
unique_value = unique_value or abort(400, '{0} missing'.format(unique_key.name))
existed = cls.ci_is_exist(unique_key, unique_value)
if existed is not None:
if exist_policy == ExistPolicy.REJECT:
return abort(400, 'CI is already existed')
if existed.type_id != ci_type.id:
existed.update(type_id=ci_type.id)
ci = existed
else:
if exist_policy == ExistPolicy.NEED:
return abort(404, 'CI <{0}> does not exist'.format(unique_value))
ci = CI.create(type_id=ci_type.id)
ci_type_attrs_name = [attr["name"] for attr in CITypeAttributeManager().get_attributes_by_type_id(ci_type.id)]
value_manager = AttributeValueManager()
for p, v in ci_dict.items():
if p not in ci_type_attrs_name:
current_app.logger.warning('ci_type: {0} not has attribute {1}, please check!'.format(ci_type_name, p))
continue
try:
value_manager.create_or_update_attr_value(p, v, ci, _no_attribute_policy)
except BadRequest as e:
if existed is None:
cls.delete(ci.id)
raise e
ci_cache.apply_async([ci.id], queue=CMDB_QUEUE)
return ci.id
def update(self, ci_id, **ci_dict):
ci = self.confirm_ci_existed(ci_id)
ci_type_attrs_name = [attr["name"] for attr in CITypeAttributeManager().get_attributes_by_type_id(ci.type_id)]
value_manager = AttributeValueManager()
for p, v in ci_dict.items():
if p not in ci_type_attrs_name:
current_app.logger.warning('ci_type: {0} not has attribute {1}, please check!'.format(ci.type_id, p))
continue
try:
value_manager.create_or_update_attr_value(p, v, ci)
except BadRequest as e:
raise e
ci_cache.apply_async([ci_id], queue=CMDB_QUEUE)
@staticmethod
def update_unique_value(ci_id, unique_name, unique_value):
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not found".format(ci_id))
AttributeValueManager().create_or_update_attr_value(unique_name, unique_value, ci)
ci_cache.apply_async([ci_id], queue=CMDB_QUEUE)
@staticmethod
def delete(ci_id):
ci = CI.get_by_id(ci_id) or abort(404, "CI <{0}> is not found".format(ci_id))
attrs = CITypeAttribute.get_by(type_id=ci.type_id, to_dict=False)
attr_names = set([AttributeCache.get(attr.attr_id).name for attr in attrs])
for attr_name in attr_names:
value_table = TableMap(attr_name=attr_name).table
for item in value_table.get_by(ci_id=ci_id, to_dict=False):
item.delete()
for item in CIRelation.get_by(first_ci_id=ci_id, to_dict=False):
ci_relation_delete.apply_async(args=(item.first_ci_id, item.second_ci_id), queue=CMDB_QUEUE)
item.delete()
for item in CIRelation.get_by(second_ci_id=ci_id, to_dict=False):
ci_relation_delete.apply_async(args=(item.first_ci_id, item.second_ci_id), queue=CMDB_QUEUE)
item.delete()
ci.delete() # TODO: soft delete
AttributeHistoryManger.add(ci_id, [(None, OperateType.DELETE, None, None)])
ci_delete.apply_async([ci.id], queue=CMDB_QUEUE)
return ci_id
@staticmethod
def add_heartbeat(ci_type, unique_value):
ci_type = CITypeManager().check_is_existed(ci_type)
unique_key = AttributeCache.get(ci_type.unique_id)
value_table = TableMap(attr_name=unique_key.name).table
v = value_table.get_by(attr_id=unique_key.id,
value=unique_value,
to_dict=False,
first=True) \
or abort(404, "not found")
ci = CI.get_by_id(v.ci_id) or abort(404, "CI <{0}> is not found".format(v.ci_id))
ci.update(heartbeat=datetime.datetime.now())
@classmethod
@kwargs_required("type_id", "page")
def get_heartbeat(cls, **kwargs):
query = db.session.query(CI.id, CI.heartbeat).filter(CI.deleted.is_(False))
expire = datetime.datetime.now() - datetime.timedelta(minutes=72)
type_ids = handle_arg_list(kwargs["type_id"])
query = query.filter(CI.type_id.in_(type_ids))
page = kwargs.get("page")
agent_status = kwargs.get("agent_status")
if agent_status == -1:
query = query.filter(CI.heartbeat.is_(None))
elif agent_status == 0:
query = query.filter(CI.heartbeat <= expire)
elif agent_status == 1:
query = query.filter(CI.heartbeat > expire)
numfound = query.count()
per_page_count = current_app.config.get("DEFAULT_PAGE_COUNT")
cis = query.offset((page - 1) * per_page_count).limit(per_page_count).all()
ci_ids = [ci.id for ci in cis]
heartbeat_dict = {}
for ci in cis:
if agent_status is not None:
heartbeat_dict[ci.id] = agent_status
else:
if ci.heartbeat is None:
heartbeat_dict[ci.id] = -1
elif ci.heartbeat <= expire:
heartbeat_dict[ci.id] = 0
else:
heartbeat_dict[ci.id] = 1
current_app.logger.debug(heartbeat_dict)
ci_ids = list(map(str, ci_ids))
res = cls.get_cis_by_ids(ci_ids, fields=["hostname", "private_ip"])
result = [(i.get("hostname"), i.get("private_ip")[0], i.get("ci_type"),
heartbeat_dict.get(i.get("_id"))) for i in res
if i.get("private_ip")]
return numfound, result
@staticmethod
def _get_cis_from_cache(ci_ids, ret_key=RetKey.NAME, fields=None):
res = rd.get(ci_ids, REDIS_PREFIX_CI)
if res is not None and None not in res and ret_key == RetKey.NAME:
res = list(map(json.loads, res))
if not fields:
return res
else:
_res = []
for d in res:
_d = dict()
_d["_id"], _d["_type"] = d.get("_id"), d.get("_type")
_d["ci_type"] = d.get("ci_type")
for field in fields:
_d[field] = d.get(field)
_res.append(_d)
return _res
@staticmethod
def _get_cis_from_db(ci_ids, ret_key=RetKey.NAME, fields=None, value_tables=None):
if not fields:
filter_fields_sql = ""
else:
_fields = list()
for field in fields:
attr = AttributeCache.get(field)
if attr is not None:
_fields.append(str(attr.id))
filter_fields_sql = "WHERE A.attr_id in ({0})".format(",".join(_fields))
ci_ids = ",".join(ci_ids)
if value_tables is None:
value_tables = ValueTypeMap.table_name.values()
value_sql = " UNION ".join([QUERY_CIS_BY_VALUE_TABLE.format(value_table, ci_ids)
for value_table in value_tables])
query_sql = QUERY_CIS_BY_IDS.format(filter_fields_sql, value_sql)
# current_app.logger.debug(query_sql)
cis = db.session.execute(query_sql).fetchall()
ci_set = set()
res = list()
ci_dict = dict()
for ci_id, type_id, attr_id, attr_name, attr_alias, value, value_type, is_list in cis:
if ci_id not in ci_set:
ci_dict = dict()
ci_type = CITypeCache.get(type_id)
ci_dict["ci_id"] = ci_id
ci_dict["ci_type"] = type_id
ci_dict["ci_type"] = ci_type.name
ci_dict["ci_type_alias"] = ci_type.alias
ci_set.add(ci_id)
res.append(ci_dict)
if ret_key == RetKey.NAME:
attr_key = attr_name
elif ret_key == RetKey.ALIAS:
attr_key = attr_alias
elif ret_key == RetKey.ID:
attr_key = attr_id
else:
return abort(400, "invalid ret key")
value = ValueTypeMap.serialize2[value_type](value)
if is_list:
ci_dict.setdefault(attr_key, []).append(value)
else:
ci_dict[attr_key] = value
return res
@classmethod
def get_cis_by_ids(cls, ci_ids, ret_key=RetKey.NAME, fields=None, value_tables=None):
"""
:param ci_ids: list of CI instance ID, eg. ['1', '2']
:param ret_key: name, id or alias
:param fields:
:param value_tables:
:return:
"""
if not ci_ids:
return []
fields = [] if fields is None or not isinstance(fields, list) else fields
ci_id_tuple = tuple(map(int, ci_ids))
res = cls._get_cis_from_cache(ci_id_tuple, ret_key, fields)
if res is not None:
return res
current_app.logger.warning("cache not hit...............")
return cls._get_cis_from_db(ci_ids, ret_key, fields, value_tables)
class CIRelationManager(object):
"""
Manage relation between CIs
"""
def __init__(self):
pass
@classmethod
def get_children(cls, ci_id, ret_key=RetKey.NAME):
second_cis = CIRelation.get_by(first_ci_id=ci_id, to_dict=False)
second_ci_ids = (second_ci.second_ci_id for second_ci in second_cis)
ci_type2ci_ids = dict()
for ci_id in second_ci_ids:
type_id = CI.get_by_id(ci_id).type_id
ci_type2ci_ids.setdefault(type_id, []).append(ci_id)
res = {}
for type_id in ci_type2ci_ids:
ci_type = CITypeCache.get(type_id)
children = CIManager.get_cis_by_ids(list(map(str, ci_type2ci_ids[type_id])), ret_key=ret_key)
res[ci_type.name] = children
return res
@staticmethod
def get_second_cis(first_ci_id, relation_type_id=None, page=1, per_page=None):
second_cis = db.session.query(CI.id).filter(CI.deleted.is_(False)).join(
CIRelation, CIRelation.second_ci_id == CI.id).filter(
CIRelation.first_ci_id == first_ci_id).filter(CIRelation.deleted.is_(False))
if relation_type_id is not None:
second_cis = second_cis.filter(CIRelation.relation_type_id == relation_type_id)
numfound = second_cis.count()
if per_page != "all":
second_cis = second_cis.offset((page - 1) * per_page).limit(per_page).all()
ci_ids = [str(son.id) for son in second_cis]
result = CIManager.get_cis_by_ids(ci_ids)
return numfound, len(ci_ids), result
@staticmethod
def _sort_handler(sort_by, query_sql):
if sort_by.startswith("+"):
sort_type = "asc"
sort_by = sort_by[1:]
elif sort_by.startswith("-"):
sort_type = "desc"
sort_by = sort_by[1:]
else:
sort_type = "asc"
attr = AttributeCache.get(sort_by)
if attr is None:
return query_sql
attr_id = attr.id
value_table = TableMap(attr_name=sort_by).table
ci_table = query_sql.subquery()
query_sql = db.session.query(ci_table.c.id, value_table.value).join(
value_table, value_table.ci_id == ci_table.c.id).filter(
value_table.attr_id == attr_id).filter(ci_table.deleted.is_(False)).order_by(
getattr(value_table.value, sort_type)())
return query_sql
@classmethod
def get_first_cis(cls, second_ci, relation_type_id=None, page=1, per_page=None):
first_cis = db.session.query(CIRelation.first_ci_id).filter(
CIRelation.second_ci_id == second_ci).filter(CIRelation.deleted.is_(False))
if relation_type_id is not None:
first_cis = first_cis.filter(CIRelation.relation_type_id == relation_type_id)
numfound = first_cis.count()
if per_page != "all":
first_cis = first_cis.offset((page - 1) * per_page).limit(per_page).all()
first_ci_ids = [str(first_ci.first_ci_id) for first_ci in first_cis]
result = CIManager.get_cis_by_ids(first_ci_ids)
return numfound, len(first_ci_ids), result
@classmethod
def add(cls, first_ci_id, second_ci_id, more=None, relation_type_id=None, many_to_one=False):
first_ci = CIManager.confirm_ci_existed(first_ci_id)
second_ci = CIManager.confirm_ci_existed(second_ci_id)
existed = CIRelation.get_by(first_ci_id=first_ci_id,
second_ci_id=second_ci_id,
to_dict=False,
first=True)
if existed is not None:
if existed.relation_type_id != relation_type_id and relation_type_id is not None:
existed.update(relation_type_id=relation_type_id)
CIRelationHistoryManager().add(existed, OperateType.UPDATE)
else:
if relation_type_id is None:
type_relation = CITypeRelation.get_by(parent_id=first_ci.type_id,
child_id=second_ci.type_id,
first=True,
to_dict=False)
relation_type_id = type_relation and type_relation.relation_type_id
relation_type_id or abort(404, "Relation {0} <-> {1} is not found".format(
first_ci.ci_type.name, second_ci.ci_type.name))
if many_to_one:
for item in CIRelation.get_by(second_ci_id=second_ci_id,
relation_type_id=relation_type_id,
to_dict=False):
item.soft_delete()
his_manager = CIRelationHistoryManager()
his_manager.add(item, operate_type=OperateType.DELETE)
ci_relation_delete.apply_async(args=(item.first_ci_id, item.second_ci_id), queue=CMDB_QUEUE)
existed = CIRelation.create(first_ci_id=first_ci_id,
second_ci_id=second_ci_id,
relation_type_id=relation_type_id)
CIRelationHistoryManager().add(existed, OperateType.ADD)
ci_relation_cache.apply_async(args=(first_ci_id, second_ci_id), queue=CMDB_QUEUE)
if more is not None:
existed.upadte(more=more)
return existed.id
@staticmethod
def delete(cr_id):
cr = CIRelation.get_by_id(cr_id) or abort(404, "CIRelation <{0}> is not existed".format(cr_id))
cr.delete()
his_manager = CIRelationHistoryManager()
his_manager.add(cr, operate_type=OperateType.DELETE)
ci_relation_delete.apply_async(args=(cr.first_ci_id, cr.second_ci_id), queue=CMDB_QUEUE)
return cr_id
@classmethod
def delete_2(cls, first_ci_id, second_ci_id):
cr = CIRelation.get_by(first_ci_id=first_ci_id,
second_ci_id=second_ci_id,
to_dict=False,
first=True)
ci_relation_delete.apply_async(args=(first_ci_id, second_ci_id), queue=CMDB_QUEUE)
return cls.delete(cr.id)
@classmethod
def batch_update(cls, ci_ids, parents):
"""
only for many to one
:param ci_ids:
:param parents:
:return:
"""
from api.lib.cmdb.utils import TableMap
if parents is not None and isinstance(parents, dict):
for attr_name in parents:
if parents[attr_name]:
attr = AttributeCache.get(attr_name)
value_table = TableMap(attr_name=attr.name).table
parent = value_table.get_by(attr_id=attr.id, value=parents[attr_name], first=True, to_dict=False)
if not parent:
return abort(404, "{0}: {1} is not found".format(attr_name, parents[attr_name]))
parent_id = parent.ci_id
for ci_id in ci_ids:
cls.add(parent_id, ci_id, many_to_one=True)
| 0.003839 |
from numpy import *
import random
import logging
import argparse
from random import randint
import os
import os.path
import time
from AbstractSampler import AbstractSampler
def myArgmin(A):
# A is assumed to be a 1D array
bottomInds = nonzero(A==A.min())[0]
return bottomInds[randint(0,bottomInds.shape[0]-1)]
def myArgmax(A):
# A is assumed to be a 1D array
topInds = nonzero(A==A.max())[0]
return topInds[randint(0,topInds.shape[0]-1)]
class armTree:
def __init__(self,iArms,batch_size=4):
self.batch_size = int(batch_size)
self.armGroups = []
nAG = int(ceil(float(len(iArms))/self.batch_size))
Inds = batch_size * arange(nAG+1)
Inds[-1] = len(iArms)
for i in range(len(Inds)-1):
self.armGroups.append([iArms[j] for j in range(Inds[i],Inds[i+1])])
def pruneGroup(self,i,UCB):
group = self.armGroups[i]
if len(group) != UCB.shape[0]:
logging.info("ERROR: The size of the batch and the dimensions of "+\
"UCB matrix do NOT match up. Batch = %s and matrix "+\
"is = %s" % (group,UCB))
L,W = nonzero(UCB < 0.5)
for ind in range(L.shape[0]):
self.armGroups[i].pop(L[ind])
return L.shape[0] > 0
def mergeGroups(self):
oldAG = self.armGroups[:]
random.shuffle(oldAG)
oldAG.sort(cmp=lambda x,y: cmp(len(x), len(y)))
self.armGroups = []
i = 0; j = len(oldAG)-1
while i <= j:
if i == j:
self.armGroups.append(oldAG[i])
break
elif len(oldAG[i]) + len(oldAG[j]) > self.batch_size * 1.5:
self.armGroups.append(oldAG[j])
j = j-1
else:
self.armGroups.append(oldAG[i]+oldAG[j])
i = i+1; j = j-1
def mergePairOfBatches(self,i,j):
self.armGroups[i] = self.armGroups[i] + self.armGroups.pop(j)
def numArms(self):
return sum([len(ag) for ag in self.armGroups])
def __getitem__(self,key):
return self.armGroups[key % len(self.armGroups)]
def __len__(self):
return len(self.armGroups)
def index(self,batch):
return self.armGroups.index(batch)
class mergeRUCBSampler(AbstractSampler):
def __init__(self, arms=[], arg_str="", run_count=""):
parser = argparse.ArgumentParser(prog=self.__class__.__name__)
parser.add_argument("--sampler", type=str)
parser.add_argument("--RUCB_alpha_parameter", type=float, default=0.5)
parser.add_argument("--mergeRUCB_batch_size", type=int, default=4)
parser.add_argument("--mergeRUCB_delta", type=float, default=0.01)
parser.add_argument("--continue_sampling_experiment", type=str,
default="No")
parser.add_argument("--old_output_dir", type=str, default="")
parser.add_argument("--old_output_prefix", type=str, default="")
args = vars(parser.parse_known_args(arg_str.split())[0])
self.nArms = len(arms) # Number of arms
self.initArms = arms[:]
self.lArms = arms # Arms as a list of arms
if args["continue_sampling_experiment"] != "Yes":
random.shuffle(self.lArms)
self.iArms = range(self.nArms) # The indices of the arms
self.dictArms = dict(zip(self.lArms,self.iArms))
# A dictionary taking arms to their indices.
self.RealWins = ones([self.nArms, self.nArms])
self.numPlays = 2*ones([self.nArms, self.nArms])
self.PMat = self.RealWins / self.numPlays
self.invSqrtNumPlays = 1./sqrt(self.numPlays)
logging.info("Number of arms = %d" % self.nArms)
logging.info("Set of arms: %s" % arms)
self.alpha = args["RUCB_alpha_parameter"]
self.batchSize = args["mergeRUCB_batch_size"]
self.delta = args["mergeRUCB_delta"] # Prob of failure
self.tArms = armTree(self.iArms,self.batchSize)
self.UCB = ones([self.batchSize,self.batchSize])
self.currentBatch = 0
self.iteration = 1
self.C = (((4*self.alpha-1)*(self.nArms**2)) /
((2*self.alpha-1)*self.delta))**(1/(2*self.alpha-1))
self.t = ceil(self.C)+1
self.chatty = False
if run_count == "":
self.runMessage = ""
else:
self.runMessage = "Run %s: " % str(run_count)
old_output_dir = args["old_output_dir"]
old_output_prefix = args["old_output_prefix"]
if args["continue_sampling_experiment"] == "Yes" and \
old_output_dir != "" and old_output_prefix != "":
old_file = os.path.join(old_output_dir, "%s-%d.npz" \
% (old_output_prefix,int(run_count)))
data = load(old_file)
time.sleep(int(run_count))
self.t = data['time']+ceil(self.C)+1
# print "[self.lArms.index(a) for a in self.initArms] = ", [self.lArms.index(a) for a in self.initArms]
Inds = [self.initArms.index(a) for a in self.lArms]
# print "[self.initArms.index(a) for a in self.lArms] = ", Inds
self.RealWins = data['RealWins'][ix_(Inds,Inds)]
self.numPlays = self.RealWins + self.RealWins.T
self.PMat = self.RealWins / self.numPlays
self.invSqrtNumPlays = 1./sqrt(self.numPlays)
# print "data['armGroups'] = ", data['armGroups']
# print "data['RealWins'] = \n", data['RealWins']
self.tArms.armGroups = [[self.lArms.index(self.initArms[a])
for a in ag]
for ag in data['armGroups'].tolist()]
# print self.tArms.armGroups
self.iteration = int(data['iteration'])
self.currentBatch = int(self.t-ceil(self.C)) % len(self.tArms)
data.close()
logging.info("Done reading "+old_file)
# print "RealWins = \n", self.RealWins
def getUCB(self):
Inds = self.tArms[self.currentBatch]
while len(Inds) <= 1 and len(self.tArms) > 1:
self.currentBatch = (self.currentBatch+1) % len(self.tArms)
Inds = self.tArms[self.currentBatch]
self.UCB = self.PMat[ix_(Inds,Inds)] + \
sqrt(self.alpha*log(self.t)) * self.invSqrtNumPlays[ix_(Inds,Inds)]
fill_diagonal(self.UCB,.5)
def sampleTournament(self,withFig=False):
self.getUCB()
while self.tArms.pruneGroup(self.currentBatch, self.UCB):
self.getUCB()
arms = self.tArms[self.currentBatch]
champ = arms[randint(0,len(arms)-1)]
return champ
def doUCBRelChamp(self,champ,withFig=False):
champInd = self.tArms[self.currentBatch].index(champ)
ucb = self.UCB[:,champInd]
ucb[champInd] = 0
challengerInd = myArgmax(ucb)
challenger = self.tArms[self.currentBatch][challengerInd]
return challenger
def get_arms(self,withFig=False):
# This returns two arms to compare.
firstPlace = self.sampleTournament(withFig)
secondPlace = self.doUCBRelChamp(firstPlace)
r1 = self.lArms[firstPlace]
r2 = self.lArms[secondPlace]
i1 = self.initArms.index(r1)
i2 = self.initArms.index(r2)
return r1, r2, i1, i2
def update_scores(self,r_winner,r_loser):
# This method can be used to update the scores.
winner = self.dictArms[r_winner]
loser = self.dictArms[r_loser]
if (self.t - ceil(self.C)) % 1000 == 0.:
ArmGroups = [sorted([self.initArms.index(self.lArms[i]) \
for i in ag]) \
for ag in self.tArms.armGroups]
logging.info("%s%d- Number of surviving arms: %d "\
% (self.runMessage, self.t - ceil(self.C),
sum([len(ag) for ag in ArmGroups]))+\
"Surviving groups of arms: %s " \
% ArmGroups)
W = self.RealWins
UCB = W/(W+W.T) + sqrt(self.alpha*log(self.t)/(W+W.T))
Inds = [self.lArms.index(a) for a in self.initArms]
# print "AG = ", self.tArms.armGroups, "\n W = \n", W[ix_(Inds,Inds)], "\n(alpha,t) = ", (self.alpha,self.t) , "\n UCB = \n", UCB[ix_(Inds,Inds)] * (W[ix_(Inds,Inds)] > 1)
if self.tArms.numArms() <= self.nArms/(2**self.iteration)+1 \
and len(self.tArms) > 1:
self.tArms.mergeGroups()
if min([len(a) for a in self.tArms.armGroups])<=0.5*self.batchSize:
self.tArms.mergeGroups()
self.iteration = self.iteration + 1
logging.info("%s%d- Iteration %d" \
% (self.runMessage, self.t - ceil(self.C),
self.iteration))
self.RealWins[winner,loser] += 1
self.numPlays[winner,loser] += 1
self.invSqrtNumPlays[winner,loser] = \
1./sqrt(self.numPlays[winner,loser])
self.PMat[winner,loser] = \
self.RealWins[winner,loser]/self.numPlays[winner,loser]
self.numPlays[loser,winner] += 1
self.invSqrtNumPlays[loser,winner] = \
1./sqrt(self.numPlays[loser,winner])
self.PMat[loser,winner] = \
self.RealWins[loser,winner]/self.numPlays[loser,winner]
self.currentBatch = (self.currentBatch+1) % len(self.tArms)
self.t = self.t + 1
return self.initArms.index(r_winner)
def get_winner(self):
# This method can be called to find out which arm is the best so far.
self.numPlays = self.RealWins+self.RealWins.T
PMat = self.RealWins / self.numPlays
self.champ = myArgmax((PMat > 0.5).sum(axis=1))
logging.info("mergeRUCBSampler.get_winner() was called!")
return self.lArms[self.champ]
#################### OLD CODE ########################
###### V3:
# def isReady(self,UCB,width):
# sideUCB = sign(UCB+eye(UCB.shape[0])-0.5)
# LCB = 1-UCB.T
# sideLCB = sign(LCB+eye(UCB.shape[0])-0.5)
# isClear = ((sideUCB * sideLCB) > 0).all(axis=1).any()
# isClear = isClear & (UCB-LCB < width).all()
# return isClear
#
# def readyBatches(self,UCB,width):
# ready = zeros(len(self.armGroups))
# for ind in range(len(self.armGroups)):
# group = self.armGroups[ind]
# ucb = UCB[ix_(group,group)]
# ready[ind] = self.isReady(ucb,width)
# return ready
#
# def getLosers(self,UCB):
# Losers = []
# for ag in self.armGroups:
# ucb = UCB[ix_(ag,ag)]
# losers = [ag[ind] for ind in nonzero((ucb < 0.5).any(axis=1))[0]]
# Losers = Losers + losers
# return Losers
# def getFullUCB(self):
# rWins = self.RealWins
# A = rWins
# B = rWins.T
# N = maximum(A+B,ones(A.shape))
# UCB = A/N + sqrt(self.alpha*log(self.t)/N)
# fill_diagonal(UCB,.5)
# return UCB
#
# def sampleTournament(self,withFig=False):
# self.getUCB()
# wins = (self.UCB >= .5).sum(axis=1)
# champInd = myArgmax(potentialChamps)
# champ = self.tArms[self.currentBatch][champInd]
# return champ # * UCB.max(axis=1))
#
#
# def update_scores(self,r_winner,r_loser):
# # This method can be used to update the scores.
# winner = self.dictArms[r_winner]
# loser = self.dictArms[r_loser]
# self.RealWins[winner,loser] = self.RealWins[winner,loser] + 1
# if self.t % max(self.nArms,1000) == 0:
# fullUCB = self.getFullUCB()
# Losers = self.tArms.getLosers(fullUCB)
# lWinners = [self.lArms[ind] for ind in set(self.iArms)-set(Losers)]
# logging.info("%s%d- Number of surviving arms: %d "\
# % (self.runMessage, self.t,
# self.nArms - len(Losers))+\
# "Surviving arms: %s" \
# % sorted([self.initArms.index(a) for a in lWinners]))
# readyBatches = self.tArms.readyBatches(fullUCB,self.width1)
# if readyBatches.sum() > 0.75*len(self.tArms):
# self.tArms.mergeGroups()
# self.iteration = self.iteration + 1
# logging.info("%s%d- Iteration %d" \
# % (self.runMessage, self.t, self.iteration))
# self.RealWins[winner,loser] = self.RealWins[winner,loser] + 1
# self.numPlays[winner,loser] = self.numPlays[winner,loser] + 1
# self.invSqrtNumPlays[winner,loser] = \
# 1./sqrt(self.numPlays[winner,loser])
# self.PMat[winner,loser] = \
# self.RealWins[winner,loser]/self.numPlays[winner,loser]
# self.numPlays[loser,winner] = self.numPlays[loser,winner] + 1
# self.invSqrtNumPlays[loser,winner] = \
# 1./sqrt(self.numPlays[loser,winner])
# self.PMat[loser,winner] = \
# self.RealWins[loser,winner]/self.numPlays[loser,winner]
# self.currentBatch = (self.currentBatch+1) % len(self.tArms)
# self.t = self.t + 1
# return self.initArms.index(r_winner)
###### V2: would skip over batches that had a clear winner
# def getUCB(self):
# keepLooking = True
# while keepLooking:
# Inds = self.tArms[self.currentBatch % len(self.tArms)]
# tempUCB = self.PMat[ix_(Inds,Inds)] + \
# sqrt(self.alpha*log(self.t)) * self.invSqrtNumPlays[ix_(Inds,Inds)]
# fill_diagonal(tempUCB,.5)
# keepLooking = self.tArms.isReady(tempUCB,self.width2)
# self.currentBatch = (self.currentBatch+1) % len(self.tArms)
# self.currentBatch = (self.currentBatch-1) % len(self.tArms)
# self.UCB = tempUCB
###### V1: merging would happen when enough arms were defeated. ##########
# def mergeGroups(self):
# oldAG = self.armGroups[:]
# self.armGroups = []
# for i in range(len(oldAG)/2):
# self.armGroups.append(oldAG[2*i]+oldAG[2*i+1])
# if mod(len(oldAG),2) == 1:
# self.armGroups.append(oldAG[-1])
#
#
# def getUCB(self):
# Inds = self.tArms[self.currentBatch % len(self.tArms)]
# self.UCB = self.PMat[ix_(Inds,Inds)] + \
# sqrt(self.alpha*log(self.t)) * self.invSqrtNumPlays[ix_(Inds,Inds)]
# fill_diagonal(self.UCB,.5)
#
#
# def getFullUCB(self):
# rWins = self.RealWins
# A = rWins
# B = rWins.T
# N = maximum(A+B,ones(A.shape))
# UCB = A/N + sqrt(self.alpha*log(self.t)/N)
# fill_diagonal(UCB,.5)
# return UCB
#
# def sampleTournament(self,withFig=False):
# self.getUCB()
# potentialChamps = (self.UCB >= .5).all(axis=1)
# champInd = myArgmax(potentialChamps)
# champ = self.tArms[self.currentBatch][champInd]
# return champ # * UCB.max(axis=1))
#
#
# def doUCBRelChamp(self,champ,withFig=False):
# champInd = self.tArms[self.currentBatch].index(champ)
# ucb = self.UCB[:,champInd]
# if len(self.tArms) > 1:
# ucb[champInd] = 0
# challengerInd = myArgmax(ucb)
# challenger = self.tArms[self.currentBatch][challengerInd]
# return challenger
#
#
# def get_arms(self,withFig=False):
# # This returns two arms to compare.
# firstPlace = self.sampleTournament(withFig)
# secondPlace = self.doUCBRelChamp(firstPlace)
# r1 = self.lArms[firstPlace]
# r2 = self.lArms[secondPlace]
# i1 = self.initArms.index(r1)
# i2 = self.initArms.index(r2)
# return r1, r2, i1, i2
#
#
# def update_scores(self,r_winner,r_loser):
# # This method can be used to update the scores.
# winner = self.dictArms[r_winner]
# loser = self.dictArms[r_loser]
# self.RealWins[winner,loser] = self.RealWins[winner,loser] + 1
# if self.t % max(self.nArms,1000) == 0:
# Losers = self.tArms.getLosers(self.getFullUCB())
# lWinners = [self.lArms[ind] for ind in set(self.iArms)-set(Losers)]
# logging.info("%s%d- Number of surviving arms: %d "\
# % (self.runMessage, self.t,
# self.nArms - len(Losers))+\
# "Surviving arms: %s" \
# % sorted([self.initArms.index(a) for a in lWinners]))
# nPotentialChamps = self.nArms - len(Losers)
# if nPotentialChamps < 1.5*len(self.tArms):
# self.tArms.mergeGroups()
# self.iteration = self.iteration + 1
# logging.info("%s%d- Iteration %d" \
# % (self.runMessage, self.t, self.iteration))
# self.currentBatch = (self.currentBatch+1) % len(self.tArms)
# self.RealWins[winner,loser] = self.RealWins[winner,loser] + 1
# self.numPlays[winner,loser] = self.numPlays[winner,loser] + 1
# self.invSqrtNumPlays[winner,loser] = \
# 1./sqrt(self.numPlays[winner,loser])
# self.PMat[winner,loser] = \
# self.RealWins[winner,loser]/self.numPlays[winner,loser]
# self.numPlays[loser,winner] = self.numPlays[loser,winner] + 1
# self.invSqrtNumPlays[loser,winner] = \
# 1./sqrt(self.numPlays[loser,winner])
# self.PMat[loser,winner] = \
# self.RealWins[loser,winner]/self.numPlays[loser,winner]
# self.t = self.t + 1
# return self.initArms.index(r_winner)
| 0.007047 |
# mysql/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database.
Supported Versions and Features
-------------------------------
SQLAlchemy supports 6 major MySQL versions: 3.23, 4.0, 4.1, 5.0, 5.1 and 6.0,
with capabilities increasing with more modern servers.
Versions 4.1 and higher support the basic SQL functionality that SQLAlchemy
uses in the ORM and SQL expressions. These versions pass the applicable tests
in the suite 100%. No heroic measures are taken to work around major missing
SQL features- if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
Most available DBAPI drivers are supported; see below.
===================================== ===============
Feature Minimum Version
===================================== ===============
sqlalchemy.orm 4.1.1
Table Reflection 3.23.x
DDL Generation 4.1.1
utf8/Full Unicode Connections 4.1.1
Transactions 3.23.15
Two-Phase Transactions 5.0.3
Nested Transactions 5.0.3
===================================== ===============
See the official MySQL documentation for detailed information about features
supported in any given server release.
Connecting
----------
See the API documentation on individual drivers for details on connecting.
Connection Timeouts
-------------------
MySQL features an automatic connection close behavior, for connections that have
been idle for eight hours or more. To circumvent having this issue, use the
``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
.. _mysql_storage_engines:
Storage Engines
---------------
Most MySQL server installations have a default table type of ``MyISAM``, a
non-transactional table type. During a transaction, non-transactional storage
engines do not participate and continue to store table changes in autocommit
mode. For fully atomic transactions as well as support for foreign key
constraints, all participating tables must use a
transactional engine such as ``InnoDB``, ``Falcon``, ``SolidDB``, `PBXT`, etc.
Storage engines can be elected when creating tables in SQLAlchemy by supplying
a ``mysql_engine='whatever'`` to the ``Table`` constructor. Any MySQL table
creation option can be specified in this syntax::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
.. seealso::
`The InnoDB Storage Engine <http://dev.mysql.com/doc/refman/5.0/en/innodb-storage-engine.html>`_ - on the MySQL website.
Case Sensitivity and Table Reflection
-------------------------------------
MySQL has inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL
database itself, especially if database reflection features are
to be used.
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level``
parameter which results in the command ``SET SESSION
TRANSACTION ISOLATION LEVEL <level>`` being invoked for
every new connection. Valid values for this parameter are
``READ COMMITTED``, ``READ UNCOMMITTED``,
``REPEATABLE READ``, and ``SERIALIZABLE``::
engine = create_engine(
"mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
.. versionadded:: 0.7.6
Keys
----
Not all MySQL storage engines support foreign keys. For ``MyISAM`` and
similar engines, the information loaded by table reflection will not include
foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
an integer primary key column::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by supplying ``autoincrement=False`` to the
:class:`~sqlalchemy.Column`. This flag can also be used to enable
auto-increment on a secondary column in a multi-column key for some storage
engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
SQL Mode
--------
MySQL SQL modes are supported. Modes that enable ``ANSI_QUOTES`` (such as
``ANSI``) require an engine option to modify SQLAlchemy's quoting style.
When using an ANSI-quoting mode, supply ``use_ansiquotes=True`` when
creating your ``Engine``::
create_engine('mysql://localhost/test', use_ansiquotes=True)
This is an engine-wide option and is not toggleable on a per-connection basis.
SQLAlchemy does not presume to ``SET sql_mode`` for you with this option. For
the best performance, set the quoting style server-wide in ``my.cnf`` or by
supplying ``--sql-mode`` to ``mysqld``. You can also use a
:class:`sqlalchemy.pool.Pool` listener hook to issue a ``SET SESSION
sql_mode='...'`` on connect to configure each connection.
If you do not specify ``use_ansiquotes``, the regular MySQL quoting style is
used by default.
If you do issue a ``SET sql_mode`` through SQLAlchemy, the dialect must be
updated if the quoting style is changed. Again, this change will affect all
connections::
connection.execute('SET sql_mode="ansi"')
connection.dialect.use_ansiquotes = True
MySQL SQL Extensions
--------------------
Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid MySQL statement can be executed as a string as well.
Some limited direct support for MySQL extensions to SQL is currently
available.
* SELECT pragma::
select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10)
rowcount Support
----------------
SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag,
or whatever is equivalent for the DBAPI in use, on connect, unless the flag value
is overridden using DBAPI-specific options
(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the
OurSQL driver).
See also:
:attr:`.ResultProxy.rowcount`
CAST Support
------------
MySQL documents the CAST operator as available in version 4.0.2. When using the
SQLAlchemy :func:`.cast` function, SQLAlchemy
will not render the CAST token on MySQL before this version, based on server version
detection, instead rendering the internal expression directly.
CAST may still not be desirable on an early MySQL version post-4.0.2, as it didn't
add all datatype support until 4.1.1. If your application falls into this
narrow area, the behavior of CAST can be controlled using the :ref:`sqlalchemy.ext.compiler_toplevel`
system, as per the recipe below::
from sqlalchemy.sql.expression import _Cast
from sqlalchemy.ext.compiler import compiles
@compiles(_Cast, 'mysql')
def _check_mysql_version(element, compiler, **kw):
if compiler.dialect.server_version_info < (4, 1, 0):
return compiler.process(element.clause, **kw)
else:
return compiler.visit_cast(element, **kw)
The above function, which only needs to be declared once
within an application, overrides the compilation of the
:func:`.cast` construct to check for version 4.1.0 before
fully rendering CAST; else the internal element of the
construct is rendered directly.
.. _mysql_indexes:
MySQL Specific Index Options
----------------------------
MySQL-specific extensions to the :class:`.Index` construct are available.
Index Length
~~~~~~~~~~~~~
MySQL provides an option to create index entries with a certain length, where
"length" refers to the number of characters or bytes in each value which will
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` parameter::
Index('my_index', my_table.c.data, mysql_length=10)
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument will be
simply passed through to the underlying CREATE INDEX command, so it *must* be
an integer. MySQL only allows a length for an index if it is for a CHAR,
VARCHAR, TEXT, BINARY, VARBINARY and BLOB.
Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash')
As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
http://dev.mysql.com/doc/refman/5.0/en/create-index.html
http://dev.mysql.com/doc/refman/5.0/en/create-table.html
"""
import datetime, inspect, re, sys
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, log, sql, util
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy.sql import functions as sql_functions
from sqlalchemy.sql import compiler
from array import array as _array
from sqlalchemy.engine import reflection
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy import types as sqltypes
from sqlalchemy.util import topological
from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \
BLOB, BINARY, VARBINARY
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze','and', 'as', 'asc',
'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check',
'collate', 'column', 'condition', 'constraint', 'continue', 'convert',
'create', 'cross', 'current_date', 'current_time', 'current_timestamp',
'current_user', 'cursor', 'database', 'databases', 'day_hour',
'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal',
'declare', 'default', 'delayed', 'delete', 'desc', 'describe',
'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having',
'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if',
'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive',
'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer',
'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill',
'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load',
'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext',
'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match',
'mediumblob', 'mediumint', 'mediumtext', 'middleint',
'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads',
'read_only', 'read_write', 'real', 'references', 'regexp', 'release',
'rename', 'repeat', 'replace', 'require', 'restrict', 'return',
'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond',
'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial',
'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning',
'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl',
'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob',
'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo',
'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use',
'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary',
'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with',
'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0
'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1
'accessible', 'linear', 'master_ssl_verify_server_cert', 'range',
'read_only', 'read_write', # 5.1
])
AUTOCOMMIT_RE = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)',
re.I | re.UNICODE)
SET_RE = re.compile(
r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w',
re.I | re.UNICODE)
class _NumericType(object):
"""Base for MySQL numeric types."""
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
super(_IntegerType, self).__init__(**kw)
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
def __init__(self, charset=None, collation=None,
ascii=False, binary=False,
national=False, **kw):
self.charset = charset
# allow collate= or collation=
self.collation = kw.pop('collate', collation)
self.ascii = ascii
# We have to munge the 'unicode' param strictly as a dict
# otherwise 2to3 will turn it into str.
self.__dict__['unicode'] = kw.get('unicode', False)
# sqltypes.String does not accept the 'unicode' arg at all.
if 'unicode' in kw:
del kw['unicode']
self.binary = binary
self.national = national
super(_StringType, self).__init__(**kw)
def __repr__(self):
attributes = inspect.getargspec(self.__init__)[0][1:]
attributes.extend(inspect.getargspec(_StringType.__init__)[0][1:])
params = {}
for attr in attributes:
val = getattr(self, attr)
if val is not None and val is not False:
params[attr] = val
return "%s(%s)" % (self.__class__.__name__,
', '.join(['%s=%r' % (k, params[k]) for k in params]))
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""MySQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DECIMAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DOUBLE(_FloatType):
"""MySQL DOUBLE type."""
__visit_name__ = 'DOUBLE'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = 'REAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""MySQL FLOAT type."""
__visit_name__ = 'FLOAT'
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(INTEGER, self).__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
"""MySQL BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(BIGINT, self).__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
"""MySQL MEDIUMINTEGER type."""
__visit_name__ = 'MEDIUMINT'
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
"""MySQL TINYINT type."""
__visit_name__ = 'TINYINT'
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
Note: following the usual MySQL conventions, TINYINT(1) columns
reflected during Table(..., autoload=True) are treated as
Boolean columns.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(TINYINT, self).__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = 'SMALLINT'
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(SMALLINT, self).__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for
MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger()
type.
"""
__visit_name__ = 'BIT'
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
already do this, so this logic should be moved to those dialects.
"""
def process(value):
if value is not None:
v = 0L
for i in map(ord, value):
v = v << 8 | i
return v
return value
return process
class _MSTime(sqltypes.Time):
"""MySQL TIME type."""
__visit_name__ = 'TIME'
def result_processor(self, dialect, coltype):
time = datetime.time
def process(value):
# convert from a timedelta value
if value is not None:
seconds = value.seconds
minutes = seconds / 60
return time(minutes / 60, minutes % 60, seconds - minutes * 60)
else:
return None
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
class YEAR(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
__visit_name__ = 'YEAR'
def __init__(self, display_width=None):
self.display_width = display_width
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
__visit_name__ = 'TEXT'
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
__visit_name__ = 'TINYTEXT'
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TINYTEXT, self).__init__(**kwargs)
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
__visit_name__ = 'MEDIUMTEXT'
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MEDIUMTEXT, self).__init__(**kwargs)
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
__visit_name__ = 'LONGTEXT'
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(LONGTEXT, self).__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MySQL VARCHAR type, for variable-length character data."""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NVARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NVARCHAR, self).__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NCHAR, self).__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
__visit_name__ = 'TINYBLOB'
class MEDIUMBLOB(sqltypes._Binary):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
__visit_name__ = 'MEDIUMBLOB'
class LONGBLOB(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = 'LONGBLOB'
class ENUM(sqltypes.Enum, _StringType):
"""MySQL ENUM type."""
__visit_name__ = 'ENUM'
def __init__(self, *enums, **kw):
"""Construct an ENUM.
Example:
Column('myenum', MSEnum("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below).
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that MySQL will not raise a fatal error if you attempt to store
an out of range value- an alternate value will be stored instead.
(See MySQL ENUM documentation.)
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
self.quoting = kw.pop('quoting', 'auto')
if self.quoting == 'auto' and len(enums):
# What quoting character are we using?
q = None
for e in enums:
if len(e) == 0:
self.quoting = 'unquoted'
break
elif q is None:
q = e[0]
if e[0] != q or e[-1] != q:
self.quoting = 'unquoted'
break
else:
self.quoting = 'quoted'
if self.quoting == 'quoted':
util.warn_deprecated(
'Manually quoting ENUM value literals is deprecated. Supply '
'unquoted values and use the quoting= option in cases of '
'ambiguity.')
enums = self._strip_enums(enums)
self.strict = kw.pop('strict', False)
length = max([len(v) for v in enums] + [0])
kw.pop('metadata', None)
kw.pop('schema', None)
kw.pop('name', None)
kw.pop('quote', None)
kw.pop('native_enum', None)
_StringType.__init__(self, length=length, **kw)
sqltypes.Enum.__init__(self, *enums)
@classmethod
def _strip_enums(cls, enums):
strip_enums = []
for a in enums:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_enums.append(a)
return strip_enums
def bind_processor(self, dialect):
super_convert = super(ENUM, self).bind_processor(dialect)
def process(value):
if self.strict and value is not None and value not in self.enums:
raise exc.InvalidRequestError('"%s" not a valid value for '
'this enum' % value)
if super_convert:
return super_convert(value)
else:
return value
return process
def adapt(self, impltype, **kw):
kw['strict'] = self.strict
return sqltypes.Enum.adapt(self, impltype, **kw)
class SET(_StringType):
"""MySQL SET type."""
__visit_name__ = 'SET'
def __init__(self, *values, **kw):
"""Construct a SET.
Example::
Column('myset', MSSet("'foo'", "'bar'", "'baz'"))
:param values: The range of valid values for this SET. Values will be
used exactly as they appear when generating schemas. Strings must
be quoted, as in the example above. Single-quotes are suggested for
ANSI compatibility and are required for portability to servers with
ANSI_QUOTES enabled.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
self._ddl_values = values
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
self.values = strip_values
kw.setdefault('length', max([len(v) for v in strip_values] + [0]))
super(SET, self).__init__(**kw)
def result_processor(self, dialect, coltype):
def process(value):
# The good news:
# No ',' quoting issues- commas aren't allowed in SET values
# The bad news:
# Plenty of driver inconsistencies here.
if isinstance(value, util.set_types):
# ..some versions convert '' to an empty set
if not value:
value.add('')
# ..some return sets.Set, even for pythons that have __builtin__.set
if not isinstance(value, set):
value = set(value)
return value
# ...and some versions return strings
if value is not None:
return set(value.split(','))
else:
return value
return process
def bind_processor(self, dialect):
super_convert = super(SET, self).bind_processor(dialect)
def process(value):
if value is None or isinstance(value, (int, long, basestring)):
pass
else:
if None in value:
value = set(value)
value.remove(None)
value.add('')
value = ','.join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
# old names
MSTime = _MSTime
MSSet = SET
MSEnum = ENUM
MSLongBlob = LONGBLOB
MSMediumBlob = MEDIUMBLOB
MSTinyBlob = TINYBLOB
MSBlob = BLOB
MSBinary = BINARY
MSVarBinary = VARBINARY
MSNChar = NCHAR
MSNVarChar = NVARCHAR
MSChar = CHAR
MSString = VARCHAR
MSLongText = LONGTEXT
MSMediumText = MEDIUMTEXT
MSTinyText = TINYTEXT
MSText = TEXT
MSYear = YEAR
MSTimeStamp = TIMESTAMP
MSBit = BIT
MSSmallInteger = SMALLINT
MSTinyInteger = TINYINT
MSMediumInteger = MEDIUMINT
MSBigInteger = BIGINT
MSNumeric = NUMERIC
MSDecimal = DECIMAL
MSDouble = DOUBLE
MSReal = REAL
MSFloat = FLOAT
MSInteger = INTEGER
colspecs = {
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: _MSTime,
sqltypes.Enum: ENUM,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
'bigint': BIGINT,
'binary': BINARY,
'bit': BIT,
'blob': BLOB,
'boolean': BOOLEAN,
'char': CHAR,
'date': DATE,
'datetime': DATETIME,
'decimal': DECIMAL,
'double': DOUBLE,
'enum': ENUM,
'fixed': DECIMAL,
'float': FLOAT,
'int': INTEGER,
'integer': INTEGER,
'longblob': LONGBLOB,
'longtext': LONGTEXT,
'mediumblob': MEDIUMBLOB,
'mediumint': MEDIUMINT,
'mediumtext': MEDIUMTEXT,
'nchar': NCHAR,
'nvarchar': NVARCHAR,
'numeric': NUMERIC,
'set': SET,
'smallint': SMALLINT,
'text': TEXT,
'time': TIME,
'timestamp': TIMESTAMP,
'tinyblob': TINYBLOB,
'tinyint': TINYINT,
'tinytext': TINYTEXT,
'varbinary': VARBINARY,
'varchar': VARCHAR,
'year': YEAR,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
class MySQLCompiler(compiler.SQLCompiler):
render_table_with_column_in_update_from = True
"""Overridden from base SQLCompiler value"""
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update ({
'milliseconds': 'millisecond',
})
def visit_random_func(self, fn, **kw):
return "rand%s" % self.function_argspec(fn)
def visit_utc_timestamp_func(self, fn, **kw):
return "UTC_TIMESTAMP"
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
def visit_concat_op(self, binary, **kw):
return "concat(%s, %s)" % (self.process(binary.left), self.process(binary.right))
def visit_match_op(self, binary, **kw):
return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % (self.process(binary.left), self.process(binary.right))
def get_from_hint_text(self, table, text):
return text
def visit_typeclause(self, typeclause):
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.Integer):
if getattr(type_, 'unsigned', False):
return 'UNSIGNED INTEGER'
else:
return 'SIGNED INTEGER'
elif isinstance(type_, sqltypes.TIMESTAMP):
return 'DATETIME'
elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime, sqltypes.Date, sqltypes.Time)):
return self.dialect.type_compiler.process(type_)
elif isinstance(type_, sqltypes.Text):
return 'CHAR'
elif (isinstance(type_, sqltypes.String) and not
isinstance(type_, (ENUM, SET))):
if getattr(type_, 'length'):
return 'CHAR(%s)' % type_.length
else:
return 'CHAR'
elif isinstance(type_, sqltypes._Binary):
return 'BINARY'
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(type_).replace('NUMERIC', 'DECIMAL')
else:
return None
def visit_cast(self, cast, **kwargs):
# No cast until 4, no decimals until 5.
if not self.dialect._supports_cast:
return self.process(cast.clause.self_group())
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause.self_group())
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
def render_literal_value(self, value, type_):
value = super(MySQLCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
def get_select_precolumns(self, select):
"""Add special MySQL keywords in place of DISTINCT.
.. note::
this usage is deprecated. :meth:`.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
"""
if isinstance(select._distinct, basestring):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
# 'JOIN ... ON ...' for inner joins isn't available until 4.0.
# Apparently < 3.23.17 requires theta joins for inner joins
# (but not outer). Not generating these currently, but
# support can be added, preferably after dialects are
# refactored to be version-sensitive.
return ''.join(
(self.process(join.left, asfrom=True, **kwargs),
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "),
self.process(join.right, asfrom=True, **kwargs),
" ON ",
self.process(join.onclause, **kwargs)))
def for_update_clause(self, select):
if select.for_update == 'read':
return ' LOCK IN SHARE MODE'
else:
return super(MySQLCompiler, self).for_update_clause(select)
def limit_clause(self, select):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit, offset = select._limit, select._offset
if (limit, offset) == (None, None):
return ''
elif offset is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# http://dev.mysql.com/doc/refman/5.0/en/select.html
if limit is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
"18446744073709551615")
else:
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
self.process(sql.literal(limit)))
else:
# No offset provided, so just use the limit
return ' \n LIMIT %s' % (self.process(sql.literal(limit)),)
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None)
if limit:
return "LIMIT %s" % limit
else:
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
for t in [from_table] + list(extra_froms))
def update_from_clause(self, update_stmt, from_table,
extra_froms, from_hints, **kw):
return None
# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
# Starting with MySQL 4.1.2, these indexes are created automatically.
# In older versions, the indexes must be created explicitly or the
# creation of foreign key constraints fails."
class MySQLDDLCompiler(compiler.DDLCompiler):
def create_table_constraints(self, table):
"""Get table constraints."""
constraint_string = super(MySQLDDLCompiler, self).create_table_constraints(table)
engine_key = '%s_engine' % self.dialect.name
is_innodb = table.kwargs.has_key(engine_key) and \
table.kwargs[engine_key].lower() == 'innodb'
auto_inc_column = table._autoincrement_column
if is_innodb and \
auto_inc_column is not None and \
auto_inc_column is not list(table.primary_key)[0]:
if constraint_string:
constraint_string += ", \n\t"
constraint_string += "KEY %s (%s)" % (
self.preparer.quote(
"idx_autoinc_%s" % auto_inc_column.name, None
),
self.preparer.format_column(auto_inc_column)
)
return constraint_string
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
colspec = [self.preparer.format_column(column),
self.dialect.type_compiler.process(column.type)
]
default = self.get_column_default_string(column)
if default is not None:
colspec.append('DEFAULT ' + default)
is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP)
if not column.nullable and not is_timestamp:
colspec.append('NOT NULL')
elif column.nullable and is_timestamp and default is None:
colspec.append('NULL')
if column is column.table._autoincrement_column and column.server_default is None:
colspec.append('AUTO_INCREMENT')
return ' '.join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(
k[len(self.dialect.name)+1:].upper(),
v
)
for k, v in table.kwargs.items()
if k.startswith('%s_' % self.dialect.name)
)
for opt in topological.sort([
('DEFAULT_CHARSET', 'COLLATE'),
('DEFAULT_CHARACTER_SET', 'COLLATE')
], opts):
arg = opts[opt]
if opt in _options_of_type_string:
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
'DEFAULT_CHARSET',
'DEFAULT_COLLATE'):
opt = opt.replace('_', ' ')
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
'CHARACTER SET', 'COLLATE'):
joiner = ' '
table_opts.append(joiner.join((opt, arg)))
return ' '.join(table_opts)
def visit_create_index(self, create):
index = create.element
preparer = self.preparer
table = preparer.format_table(index.table)
columns = [preparer.quote(c.name, c.quote) for c in index.columns]
name = preparer.quote(
self._index_identifier(index.name),
index.quote)
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s " % (name, table)
columns = ', '.join(columns)
if 'mysql_length' in index.kwargs:
length = index.kwargs['mysql_length']
text += "(%s(%d))" % (columns, length)
else:
text += "(%s)" % (columns)
if 'mysql_using' in index.kwargs:
using = index.kwargs['mysql_using']
text += " USING %s" % (preparer.quote(using, index.quote))
return text
def visit_primary_key_constraint(self, constraint):
text = super(MySQLDDLCompiler, self).\
visit_primary_key_constraint(constraint)
if "mysql_using" in constraint.kwargs:
using = constraint.kwargs['mysql_using']
text += " USING %s" % (
self.preparer.quote(using, constraint.quote))
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s ON %s" % \
(self.preparer.quote(
self._index_identifier(index.name), index.quote
),
self.preparer.format_table(index.table))
def visit_drop_constraint(self, drop):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % \
(self.preparer.format_table(constraint.table),
qual, const)
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if not self._mysql_type(type_):
return spec
if type_.unsigned:
spec += ' UNSIGNED'
if type_.zerofill:
spec += ' ZEROFILL'
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr('charset'):
charset = 'CHARACTER SET %s' % attr('charset')
elif attr('ascii'):
charset = 'ASCII'
elif attr('unicode'):
charset = 'UNICODE'
else:
charset = None
if attr('collation'):
collation = 'COLLATE %s' % type_.collation
elif attr('binary'):
collation = 'BINARY'
else:
collation = None
if attr('national'):
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return ' '.join([c for c in ('NATIONAL', spec, collation)
if c is not None])
return ' '.join([c for c in (spec, charset, collation)
if c is not None])
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
def visit_NUMERIC(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DECIMAL(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DOUBLE(self, type_):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_, "DOUBLE(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale' : type_.scale})
else:
return self._extend_numeric(type_, 'DOUBLE')
def visit_REAL(self, type_):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_, "REAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale' : type_.scale})
else:
return self._extend_numeric(type_, 'REAL')
def visit_FLOAT(self, type_):
if self._mysql_type(type_) and \
type_.scale is not None and \
type_.precision is not None:
return self._extend_numeric(type_,
"FLOAT(%s, %s)" % (type_.precision, type_.scale))
elif type_.precision is not None:
return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,))
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"INTEGER(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"BIGINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"MEDIUMINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "MEDIUMINT")
def visit_TINYINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_, "TINYINT(%s)" % type_.display_width)
else:
return self._extend_numeric(type_, "TINYINT")
def visit_SMALLINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"SMALLINT(%(display_width)s)" %
{'display_width': type_.display_width}
)
else:
return self._extend_numeric(type_, "SMALLINT")
def visit_BIT(self, type_):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
def visit_DATETIME(self, type_):
return "DATETIME"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_TIMESTAMP(self, type_):
return 'TIMESTAMP'
def visit_YEAR(self, type_):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
def visit_TEXT(self, type_):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
def visit_TINYTEXT(self, type_):
return self._extend_string(type_, {}, "TINYTEXT")
def visit_MEDIUMTEXT(self, type_):
return self._extend_string(type_, {}, "MEDIUMTEXT")
def visit_LONGTEXT(self, type_):
return self._extend_string(type_, {}, "LONGTEXT")
def visit_VARCHAR(self, type_):
if type_.length:
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_CHAR(self, type_):
if type_.length:
return self._extend_string(type_, {}, "CHAR(%(length)s)" % {'length' : type_.length})
else:
return self._extend_string(type_, {}, "CHAR")
def visit_NVARCHAR(self, type_):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
return self._extend_string(type_, {'national':True}, "VARCHAR(%(length)s)" % {'length': type_.length})
else:
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_NCHAR(self, type_):
# We'll actually generate the equiv. "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
return self._extend_string(type_, {'national':True}, "CHAR(%(length)s)" % {'length': type_.length})
else:
return self._extend_string(type_, {'national':True}, "CHAR")
def visit_VARBINARY(self, type_):
return "VARBINARY(%d)" % type_.length
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_enum(self, type_):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self.visit_ENUM(type_)
def visit_BLOB(self, type_):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
def visit_TINYBLOB(self, type_):
return "TINYBLOB"
def visit_MEDIUMBLOB(self, type_):
return "MEDIUMBLOB"
def visit_LONGBLOB(self, type_):
return "LONGBLOB"
def visit_ENUM(self, type_):
quoted_enums = []
for e in type_.enums:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(type_, {}, "ENUM(%s)" % ",".join(quoted_enums))
def visit_SET(self, type_):
return self._extend_string(type_, {}, "SET(%s)" % ",".join(type_._ddl_values))
def visit_BOOLEAN(self, type):
return "BOOL"
class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect, server_ansiquotes=False, **kw):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect,
initial_quote=quote,
escape_quote=quote)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect. Not used directly in application code."""
name = 'mysql'
supports_alter = True
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
max_index_name_length = 64
supports_native_enum = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
default_paramstyle = 'format'
colspecs = colspecs
statement_compiler = MySQLCompiler
ddl_compiler = MySQLDDLCompiler
type_compiler = MySQLTypeCompiler
ischema_names = ischema_names
preparer = MySQLIdentifierPreparer
# default SQL compilation settings -
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
def __init__(self, use_ansiquotes=None, isolation_level=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(['SERIALIZABLE',
'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('SELECT @@tx_isolation')
val = cursor.fetchone()[0]
cursor.close()
return val.upper().replace("-", " ")
def do_commit(self, connection):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
# Yes, we have at least one user who has to talk to these old versions!
#
# Ignore commit/rollback if support isn't present, otherwise even basic
# operations via autocommit fail.
try:
connection.commit()
except:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_rollback(self, connection):
"""Execute a ROLLBACK."""
try:
connection.rollback()
except:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_begin_twophase(self, connection, xid):
connection.execute(sql.text("XA BEGIN :xid"), xid=xid)
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA PREPARE :xid"), xid=xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(sql.text("XA COMMIT :xid"), xid=xid)
def do_recover_twophase(self, connection):
resultset = connection.execute("XA RECOVER")
return [row['data'][0:row['gtrid_length']] for row in resultset]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver inconsistencies."""
return [_DecodingRowProxy(row, charset) for row in rp.fetchall()]
def _compat_fetchone(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver inconsistencies."""
return _DecodingRowProxy(rp.fetchone(), charset)
def _compat_first(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver inconsistencies."""
return _DecodingRowProxy(rp.first(), charset)
def _extract_error_code(self, exception):
raise NotImplementedError()
def _get_default_schema_name(self, connection):
return connection.execute('SELECT DATABASE()').scalar()
def has_table(self, connection, table_name, schema=None):
# SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
# on macosx (and maybe win?) with multibyte table names.
#
# TODO: if this is not a problem on win, make the strategy swappable
# based on platform. DESCRIBE is slower.
# [ticket:726]
# full_name = self.identifier_preparer.format_table(table,
# use_schema=True)
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
st = "DESCRIBE %s" % full_name
rs = None
try:
try:
rs = connection.execute(st)
have = rs.rowcount > 0
rs.close()
return have
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
return False
raise
finally:
if rs:
rs.close()
def initialize(self, connection):
default.DefaultDialect.initialize(self, connection)
self._connection_charset = self._detect_charset(connection)
self._server_casing = self._detect_casing(connection)
self._server_collations = self._detect_collations(connection)
self._detect_ansiquotes(connection)
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
self.identifier_preparer = self.preparer(self,
server_ansiquotes=self._server_ansiquotes)
@property
def _supports_cast(self):
return self.server_version_info is None or \
self.server_version_info >= (4, 0, 2)
@reflection.cache
def get_schema_names(self, connection, **kw):
rp = connection.execute("SHOW schemas")
return [r[0] for r in rp]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
rp = connection.execute("SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)]
else:
rp = connection.execute("SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)\
if row[1] == 'BASE TABLE']
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if self.server_version_info < (5, 0, 2):
raise NotImplementedError
if schema is None:
schema = self.default_schema_name
if self.server_version_info < (5, 0, 2):
return self.get_table_names(connection, schema)
charset = self._connection_charset
rp = connection.execute("SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)\
if row[1] in ('VIEW', 'SYSTEM VIEW')]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
return parsed_state.columns
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
for key in parsed_state.keys:
if key['type'] == 'PRIMARY':
# There can be only one.
##raise Exception, str(key)
return [s[0] for s in key['columns']]
return []
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
default_schema = None
fkeys = []
for spec in parsed_state.constraints:
# only FOREIGN KEYs
ref_name = spec['table'][-1]
ref_schema = len(spec['table']) > 1 and spec['table'][-2] or schema
if not ref_schema:
if default_schema is None:
default_schema = \
connection.dialect.default_schema_name
if schema == default_schema:
ref_schema = schema
loc_names = spec['local']
ref_names = spec['foreign']
con_kw = {}
for opt in ('name', 'onupdate', 'ondelete'):
if spec.get(opt, False):
con_kw[opt] = spec[opt]
fkey_d = {
'name' : spec['name'],
'constrained_columns' : loc_names,
'referred_schema' : ref_schema,
'referred_table' : ref_name,
'referred_columns' : ref_names,
'options' : con_kw
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw)
indexes = []
for spec in parsed_state.keys:
unique = False
flavor = spec['type']
if flavor == 'PRIMARY':
continue
if flavor == 'UNIQUE':
unique = True
elif flavor in (None, 'FULLTEXT', 'SPATIAL'):
pass
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY" % flavor)
pass
index_d = {}
index_d['name'] = spec['name']
index_d['column_names'] = [s[0] for s in spec['columns']]
index_d['unique'] = unique
index_d['type'] = flavor
indexes.append(index_d)
return indexes
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
charset = self._connection_charset
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, view_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
return sql
def _parsed_state_or_create(self, connection, table_name, schema=None, **kw):
return self._setup_parser(
connection,
table_name,
schema,
info_cache=kw.get('info_cache', None)
)
@util.memoized_property
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
retrieved server version information first.
"""
if (self.server_version_info < (4, 1) and self._server_ansiquotes):
# ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
preparer = self.preparer(self, server_ansiquotes=False)
else:
preparer = self.identifier_preparer
return MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
charset = self._connection_charset
parser = self._tabledef_parser
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
if sql.startswith('CREATE ALGORITHM'):
# Adapt views to something table-like.
columns = self._describe_table(connection, None, charset,
full_name=full_name)
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _detect_charset(self, connection):
raise NotImplementedError()
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
charset = self._connection_charset
row = self._compat_first(connection.execute(
"SHOW VARIABLES LIKE 'lower_case_table_names'"),
charset=charset)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == 'OFF':
cs = 0
elif row[1] == 'ON':
cs = 1
else:
cs = int(row[1])
return cs
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
if self.server_version_info < (4, 1, 0):
pass
else:
charset = self._connection_charset
rs = connection.execute('SHOW COLLATION')
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
charset=self._connection_charset)
if not row:
mode = ''
else:
mode = row[1] or ''
# 4.0
if mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or ''
self._server_ansiquotes = 'ANSI_QUOTES' in mode
# as of MySQL 5.0.1
self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode
def _show_create_table(self, connection, table, charset=None,
full_name=None):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
rp = connection.execute(st)
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
row = self._compat_first(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
return sql
def _describe_table(self, connection, table, charset=None,
full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execute(st)
except exc.DBAPIError, e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
rows = self._compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.constraints = []
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r'\r?\n', show_create):
if line.startswith(' ' + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(') '):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ')':
pass
elif line.startswith('CREATE '):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == 'key':
state.keys.append(spec)
elif type_ == 'constraint':
state.constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
spec['columns'] = self._parse_keyexprs(spec['columns'])
return 'key', spec
# CONSTRAINT
m = self._re_constraint.match(line)
if m:
spec = m.groupdict()
spec['table'] = \
self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
for c in self._parse_keyexprs(spec['foreign'])]
return 'constraint', spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return 'partition', line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group('name'))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ')':
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group('directive'), m.group('val')
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub('', rest_of_line)
for nope in ('auto_increment', 'data directory', 'index directory'):
options.pop(nope, None)
for opt, val in options.items():
state.table_options['%s_%s' % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec['full'] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec['full'] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec['full']:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args, notnull = \
spec['name'], spec['coltype'], spec['arg'], spec['notnull']
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == '':
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ('charset', 'collate'):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if type_ == 'enum':
type_args = ENUM._strip_enums(type_args)
type_instance = col_type(*type_args, **type_kw)
col_args, col_kw = [], {}
# NOT NULL
col_kw['nullable'] = True
if spec.get('notnull', False):
col_kw['nullable'] = False
# AUTO_INCREMENT
if spec.get('autoincr', False):
col_kw['autoincrement'] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw['autoincrement'] = False
# DEFAULT
default = spec.get('default', None)
if default == 'NULL':
# eliminates the need to deal with this later.
default = None
col_d = dict(name=name, type=type_instance, default=default)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
[row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append('NOT NULL')
if default:
if 'auto_increment' in default:
pass
elif (col_type.startswith('timestamp') and
default.startswith('C')):
line.append('DEFAULT')
line.append(default)
elif default == 'NULL':
line.append('DEFAULT')
line.append(default)
else:
line.append('DEFAULT')
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(' '.join(line))
return ''.join([('CREATE TABLE %s (\n' %
self.preparer.quote_identifier(table_name)),
',\n'.join(buffer),
'\n) '])
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(zip(('iq', 'fq', 'esc_fq'),
[re.escape(s) for s in
(self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final))]))
self._pr_name = _pr_compile(
r'^CREATE (?:\w+ +)?TABLE +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
self.preparer._unescape_identifier)
# `col`,`col2`(32),`col3`(15) DESC
#
# Note: ASC and DESC aren't reflected, so we'll punt...
self._re_keyexprs = _re_compile(
r'(?:'
r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
# 123 or 123,456
self._re_csv_int = _re_compile(r'\d+')
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
r'(?: +(?P<unsigned>UNSIGNED))?'
r'(?: +(?P<zerofill>ZEROFILL))?'
r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
r'(?: +COLLATE +(?P<collate>[\w_]+))?'
r'(?: +(?P<notnull>NOT NULL))?'
r'(?: +DEFAULT +(?P<default>'
r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
r'(?: +ON UPDATE \w+)?)'
r'))?'
r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
r'(?: +STORAGE +(?P<storage>\w+))?'
r'(?: +(?P<extra>.*))?'
r',?$'
% quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>NOT NULL)?'
% quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name
self._re_key = _re_compile(
r' '
r'(?:(?P<type>\S+) )?KEY'
r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
r'(?: +USING +(?P<using_pre>\S+))?'
r' +\((?P<columns>.+?)\)'
r'(?: +USING +(?P<using_post>\S+))?'
r'(?: +KEY_BLOCK_SIZE +(?P<keyblock>\S+))?'
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r',?$'
% quotes
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw['on'] = 'RESTRICT|CASCASDE|SET NULL|NOACTION'
self._re_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted strings we need to get rid of.
for option in _options_of_type_string:
self._add_option_string(option)
for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
'AVG_ROW_LENGTH', 'CHARACTER SET',
'DEFAULT CHARSET', 'CHECKSUM',
'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
'KEY_BLOCK_SIZE'):
self._add_option_word(option)
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
self._add_option_regex('RAID_TYPE',
r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
_optional_equals = r'(?:\s*(?:=\s*)|\s+)'
def _add_option_string(self, directive):
regex = (r'(?P<directive>%s)%s'
r"'(?P<val>(?:[^']|'')*?)'(?!')" %
(re.escape(directive), self._optional_equals))
self._pr_options.append(
_pr_compile(regex, lambda v: v.replace("\\\\","\\").replace("''", "'")))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>\w+)' %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>%s)' %
(re.escape(directive), self._optional_equals, regex))
self._pr_options.append(_pr_compile(regex))
_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
'PASSWORD', 'CONNECTION')
log.class_logger(MySQLTableDefinitionParser)
log.class_logger(MySQLDialect)
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = charset
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
# Py2K
if self.charset and isinstance(item, str):
# end Py2K
# Py3K
#if self.charset and isinstance(item, bytes):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
# Py2K
if self.charset and isinstance(item, str):
# end Py2K
# Py3K
#if self.charset and isinstance(item, bytes):
return item.decode(self.charset)
else:
return item
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
| 0.001687 |
import os
from .base import NullBrowser, ExecutorBrowser, require_arg
from .base import get_timeout_multiplier # noqa: F401
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor # noqa: F401
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {
"product": "servo",
"check_args": "check_args",
"browser": "ServoBrowser",
"executor": {
"testharness": "ServoTestharnessExecutor",
"reftest": "ServoRefTestExecutor",
"wdspec": "ServoWdspecExecutor",
},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"timeout_multiplier": "get_timeout_multiplier",
"update_properties": "update_properties",
}
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {
"binary": kwargs["binary"],
"debug_info": kwargs["debug_info"],
"binary_args": kwargs["binary_args"],
"user_stylesheets": kwargs.get("user_stylesheets"),
"ca_certificate_path": config.ssl_config["ca_cert_path"],
}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
rv = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data, **kwargs)
rv["pause_after_test"] = kwargs["pause_after_test"]
if test_type == "wdspec":
rv["capabilities"] = {}
return rv
def env_extras(**kwargs):
return []
def env_options():
return {"server_host": "127.0.0.1",
"bind_address": False,
"testharnessreport": "testharnessreport-servo.js",
"supports_debugger": True}
def update_properties():
return ["debug", "os", "version", "processor", "bits"], None
class ServoBrowser(NullBrowser):
def __init__(self, logger, binary, debug_info=None, binary_args=None,
user_stylesheets=None, ca_certificate_path=None):
NullBrowser.__init__(self, logger)
self.binary = binary
self.debug_info = debug_info
self.binary_args = binary_args or []
self.user_stylesheets = user_stylesheets or []
self.ca_certificate_path = ca_certificate_path
def executor_browser(self):
return ExecutorBrowser, {
"binary": self.binary,
"debug_info": self.debug_info,
"binary_args": self.binary_args,
"user_stylesheets": self.user_stylesheets,
"ca_certificate_path": self.ca_certificate_path,
}
| 0 |
"""Support for Zigbee sensors."""
from binascii import hexlify
import logging
import voluptuous as vol
from homeassistant.components import zigbee
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
CONF_TYPE = 'type'
CONF_MAX_VOLTS = 'max_volts'
DEFAULT_VOLTS = 1.2
TYPES = ['analog', 'temperature']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TYPE): vol.In(TYPES),
vol.Optional(CONF_MAX_VOLTS, default=DEFAULT_VOLTS): vol.Coerce(float),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ZigBee platform.
Uses the 'type' config value to work out which type of ZigBee sensor we're
dealing with and instantiates the relevant classes to handle it.
"""
typ = config.get(CONF_TYPE)
try:
sensor_class, config_class = TYPE_CLASSES[typ]
except KeyError:
_LOGGER.exception("Unknown ZigBee sensor type: %s", typ)
return
add_entities([sensor_class(hass, config_class(config))], True)
class ZigBeeTemperatureSensor(Entity):
"""Representation of XBee Pro temperature sensor."""
def __init__(self, hass, config):
"""Initialize the sensor."""
self._config = config
self._temp = None
@property
def name(self):
"""Return the name of the sensor."""
return self._config.name
@property
def state(self):
"""Return the state of the sensor."""
return self._temp
@property
def unit_of_measurement(self):
"""Return the unit of measurement the value is expressed in."""
return TEMP_CELSIUS
def update(self):
"""Get the latest data."""
try:
self._temp = zigbee.DEVICE.get_temperature(self._config.address)
except zigbee.ZIGBEE_TX_FAILURE:
_LOGGER.warning(
"Transmission failure when attempting to get sample from "
"ZigBee device at address: %s", hexlify(self._config.address))
except zigbee.ZIGBEE_EXCEPTION as exc:
_LOGGER.exception(
"Unable to get sample from ZigBee device: %s", exc)
# This must be below the classes to which it refers.
TYPE_CLASSES = {
"temperature": (ZigBeeTemperatureSensor, zigbee.ZigBeeConfig),
"analog": (zigbee.ZigBeeAnalogIn, zigbee.ZigBeeAnalogInConfig)
}
| 0 |
#!/usr/bin/env python
import argparse
import datetime
import socket
import sys
import time
import aircraft_map
import pygame.midi
UPDATE_INTERVAL = 30.0 # seconds
MIN_ALTITUDE = 3000
MAX_ALTITUDE = 40000
MAX_DISTANCE = 70000
MIDI_VOLUME_MAX = 100
MIDI_NOTE_PALETTE = (
24,
36,
48, 50, 53, 55, 58,
60, 62, 65, 67, 70,
72, 74, 77, 79, 82,
84, 86, 89, 91, 94,
106, 108, 111, 113, 116,
118, 120, 123
)
MAX_MIDI_NOTE = len(MIDI_NOTE_PALETTE)
def map_int(x, in_min, in_max, out_min, out_max):
"""
Map input from one range to another.
"""
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min;
def set_pan(player, pan, channel):
"""
Set the panning on a MIDI channel. 0 = hard left, 127 = hard right.
"""
status = 0xb0 | channel
player.write_short(status, 0x0a, pan)
def map_bearing_to_pan(bearing):
"""
Convert a plane's bearing to a MIDI pan controller value.
"""
bearing = (int(bearing) + 270) % 360
if bearing < 180:
return map_int(bearing, 0, 180, 127, 0)
else:
return map_int(bearing, 180, 360, 0, 127)
class ADSBTheremin(object):
def __init__(self, args):
self._host = args.host
self._port = args.port
self._mylat = args.lat
self._mylon = args.lon
self._midi_channels = range(args.midi_channels) # 0-based
self._num_midi_channels = len(self._midi_channels)
self._polyphony = args.polyphony
self._player = None
self._map = aircraft_map.AircraftMap(args.lat, args.lon)
def init(self):
if not pygame.midi.get_init():
pygame.midi.init()
id = 0
while True:
devinf = pygame.midi.get_device_info(id)
if devinf is not None:
(interf, name, input, output, opened) = devinf
if "IAC" in name and output == 1:
print("Using device id %d" % id)
break
else:
sys.stderr.write("Can't find IAC output\n")
sys.exit(1)
id += 1
self._player = pygame.midi.Output(id) # TODO(ggood) hardcoded device
i = 0
for channel in self._midi_channels:
# Set instrument <n> to MIDI channel <n>
self._player.set_instrument(i, channel)
i += 1
def all_notes_off(self):
for midi_channel in self._midi_channels:
for i in range(127):
self._player.note_off(i, channel=midi_channel)
def make_sound(self):
print("%s: %d aircraft" %
(datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S"),
self._map.count()))
self.all_notes_off()
# aircraft = self._map.closest(
# self._polyphony, min_altitude=MIN_ALTITUDE,
# max_altitude=MAX_ALTITUDE)
aircraft = self._map.closest(self._polyphony)
midi_channel = 0
for a in aircraft:
if (a.distance_to(self._mylat, self._mylon) > MAX_DISTANCE or
a.altitude > MAX_ALTITUDE):
continue
if (a.altitude < MIN_ALTITUDE):
continue
note_index = int(float(a.altitude) / MAX_ALTITUDE * MAX_MIDI_NOTE)
note = MIDI_NOTE_PALETTE[note_index]
volume = int((MAX_DISTANCE -
a.distance_to(self._mylat, self._mylon)) /
MAX_DISTANCE * MIDI_VOLUME_MAX)
deg = a.bearing_from(self._mylat, self._mylon)
pan_value = map_bearing_to_pan(deg)
print("XXXX pan channel %d to %d" % (midi_channel, pan_value))
set_pan(self._player, pan_value, midi_channel)
self._player.note_on(note, volume, midi_channel)
print("Id %s alt %s MIDI note %d MIDI vol %d MIDI chan %d "
"dist %d m" %
(a.id, a.altitude, note, volume, midi_channel + 1,
a.distance_to(self._mylat, self._mylon)))
midi_channel = (midi_channel + 1) % self._num_midi_channels
print("")
def play(self):
print("Connect to %s:%d" % (self._host, self._port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._host, self._port))
fp = sock.makefile()
try:
# Prime the aircraft list - just get updates for a little while
print("Priming aircraft map...")
prime_start = time.time()
while True:
if time.time() - prime_start > 3.0:
break
line = fp.readline()
self._map.update(line)
print("Done.")
last_midi_update = 0.0
while True:
line = fp.readline()
self._map.update(line)
if time.time() - last_midi_update > UPDATE_INTERVAL:
self.make_sound()
last_midi_update = time.time()
finally:
sock.close()
self.all_notes_off()
pygame.midi.quit()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--host",
help="IP address or hostname of host running dump1090",
required=True)
parser.add_argument("-p", "--port", type=int,
help="Port for dump1090 server",
required=True)
parser.add_argument("--lat", type=float, help="Your latitude",
required=True)
parser.add_argument("--lon", type=float, help="Your longitude",
required=True)
parser.add_argument("--midi-channels", type=int,
help="Number of MIDI channels to use",
default=1)
parser.add_argument("--polyphony", type=int,
help="Number of simultaneous notes",
default=8)
args = parser.parse_args()
adsb_theremin = ADSBTheremin(args)
adsb_theremin.init()
adsb_theremin.play()
if __name__ == "__main__":
main()
| 0.002288 |
"""Contains the data assimilation methods included with DAPPER.
See the README section on
[DA Methods](https://github.com/nansencenter/DAPPER#DA-Methods)
for an overview of the methods included with DAPPER.
## Defining your own method
Follow the example of one of the methods within one of the
sub-directories/packages.
The simplest example is perhaps
`dapper.da_methods.ensemble.EnKF`.
## General advice for programming/debugging scientific experiments
- Start with something simple.
This helps make sure the basics of the experiment are reasonable.
For example, start with
- a pre-existing example,
- something you are able to reproduce,
- a small/simple model.
- Set the observation error to be small.
- Observe everything.
- Don't include model error and/or noise to begin with.
- Additionally, test a simple/baseline method to begin with.
When including an ensemble method, start with using a large ensemble,
and introduce localisation later.
- Take incremental steps towards your ultimate experiment setup.
Validate each incremental setup with prints/plots.
If results change, make sure you understand why.
- Use short experiment duration.
You probably don't need statistical significance while debugging.
"""
import dataclasses
import functools
import time
from dataclasses import dataclass
import dapper.stats
def da_method(*default_dataclasses):
"""Turn a dataclass-style class into a DA method for DAPPER (`xp`).
This decorator applies to classes that define DA methods.
An instances of the resulting class is referred to (in DAPPER)
as an `xp` (short for experiment).
The decorated classes are defined like a `dataclass`,
but are decorated by `@da_method()` instead of `@dataclass`.
.. note::
The classes must define a method called `assimilate`.
This method gets slightly enhanced by this wrapper which provides:
- Initialisation of the `Stats` object, accessible by `self.stats`.
- `fail_gently` functionality.
- Duration timing
- Progressbar naming magic.
Example:
>>> @da_method()
... class Sleeper():
... "Do nothing."
... seconds : int = 10
... success : bool = True
... def assimilate(self, *args, **kwargs):
... for k in range(self.seconds):
... time.sleep(1)
... if not self.success:
... raise RuntimeError("Sleep over. Failing as intended.")
Internally, `da_method` is just like `dataclass`,
except that adds an outer layer
(hence the empty parantheses in the above)
which enables defining default parameters which can be inherited,
similar to subclassing.
Example:
>>> class ens_defaults:
... infl : float = 1.0
... rot : bool = False
>>> @da_method(ens_defaults)
... class EnKF:
... N : int
... upd_a : str = "Sqrt"
...
... def assimilate(self, HMM, xx, yy):
... ...
"""
def dataclass_with_defaults(cls):
"""Like `dataclass`, but add some DAPPER-specific things.
This adds `__init__`, `__repr__`, `__eq__`, ...,
but also includes inherited defaults,
ref https://stackoverflow.com/a/58130805,
and enhances the `assimilate` method.
"""
def set_field(name, type_, val):
"""Set the inherited (i.e. default, i.e. has value) field."""
# Ensure annotations
cls.__annotations__ = getattr(cls, '__annotations__', {})
# Set annotation
cls.__annotations__[name] = type_
# Set value
setattr(cls, name, val)
# APPend default fields without overwriting.
# NB: Don't implement (by PREpending?) non-default args -- to messy!
for default_params in default_dataclasses:
# NB: Calling dataclass twice always makes repr=True
for field in dataclasses.fields(dataclass(default_params)):
if field.name not in cls.__annotations__:
set_field(field.name, field.type, field)
# Create new class (NB: old/new classes have same id)
cls = dataclass(cls)
# The new assimilate method
def assimilate(self, HMM, xx, yy, desc=None, **stat_kwargs):
# Progressbar name
pb_name_hook = self.da_method if desc is None else desc # noqa
# Init stats
self.stats = dapper.stats.Stats(self, HMM, xx, yy, **stat_kwargs)
# Assimilate
time_start = time.time()
_assimilate(self, HMM, xx, yy)
dapper.stats.register_stat(
self.stats, "duration", time.time()-time_start)
# Overwrite the assimilate method with the new one
try:
_assimilate = cls.assimilate
except AttributeError as error:
raise AttributeError(
"Classes decorated by da_method()"
" must define a method called 'assimilate'.") from error
cls.assimilate = functools.wraps(_assimilate)(assimilate)
# Make self.__class__.__name__ an attrib.
# Used by xpList.split_attrs().
cls.da_method = cls.__name__
return cls
return dataclass_with_defaults
from .baseline import Climatology, OptInterp, Var3D
from .ensemble import LETKF, SL_EAKF, EnKF, EnKF_N, EnKS, EnRTS
from .extended import ExtKF, ExtRTS
from .other import LNETF, RHF
from .particle import OptPF, PartFilt, PFa, PFxN, PFxN_EnKF
from .variational import Var4D, iEnKS
| 0.001246 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# A script to accumulate values from the 'dmprof cat' command into CSV or else.
#
# Usage:
# ./accumulate.py -f <format> -t <template-name> < input.json > output
#
# <format> is one of "csv", "json", and "tree". If "csv" or "json" is given,
# accumulate.py dumps a similar file to "dmprof csv|json". If "tree" is given,
# accumulate.py dumps a human-readable breakdown tree.
#
# <template-name> is a label in templates.json.
import datetime
import json
import logging
import optparse
import sys
from lib.ordered_dict import OrderedDict
LOGGER = logging.getLogger('dmprof-accumulate')
def visit_in_template(template, snapshot, depth):
"""Visits all categories via a given template.
This function is not used. It's a sample function to traverse a template.
"""
world = template[0]
breakdown = template[1]
rules = template[2]
for rule, _ in snapshot[world]['breakdown'][breakdown].iteritems():
print (' ' * depth) + rule
if rule in rules:
visit_in_template(rules[rule], snapshot, depth + 1)
def accumulate(template, snapshot, units_dict, target_units):
"""Accumulates units in a JSON |snapshot| with applying a given |template|.
Args:
template: A template tree included in a dmprof cat JSON file.
snapshot: A snapshot in a dmprof cat JSON file.
units_dict: A dict of units in worlds.
target_units: A list of unit ids which are a target of this accumulation.
"""
world = template[0]
breakdown = template[1]
rules = template[2]
remainder_units = target_units.copy()
category_tree = OrderedDict()
total = 0
for rule, match in snapshot[world]['breakdown'][breakdown].iteritems():
if 'hidden' in match and match['hidden']:
continue
matched_units = set(match['units']).intersection(target_units)
subtotal = 0
for unit_id in matched_units:
subtotal += units_dict[world][unit_id]
total += subtotal
remainder_units = remainder_units.difference(matched_units)
if rule not in rules:
# A category matched with |rule| is a leaf of the breakdown tree.
# It is NOT broken down more.
category_tree[rule] = subtotal
continue
# A category matched with |rule| is broken down more.
subtemplate = rules[rule]
subworld = subtemplate[0]
subbreakdown = subtemplate[1]
if subworld == world:
# Break down in the same world: consider units.
category_tree[rule], accounted_total, subremainder_units = accumulate(
subtemplate, snapshot, units_dict, matched_units)
subremainder_total = 0
if subremainder_units:
for unit_id in subremainder_units:
subremainder_total += units_dict[world][unit_id]
category_tree[rule][None] = subremainder_total
if subtotal != accounted_total + subremainder_total:
print >> sys.stderr, (
'WARNING: Sum of %s:%s is different from %s by %d bytes.' % (
subworld, subbreakdown, rule,
subtotal - (accounted_total + subremainder_total)))
else:
# Break down in a different world: consider only the total size.
category_tree[rule], accounted_total, _ = accumulate(
subtemplate, snapshot, units_dict, set(units_dict[subworld].keys()))
if subtotal >= accounted_total:
category_tree[rule][None] = subtotal - accounted_total
else:
print >> sys.stderr, (
'WARNING: Sum of %s:%s is larger than %s by %d bytes.' % (
subworld, subbreakdown, rule, accounted_total - subtotal))
print >> sys.stderr, (
'WARNING: Assuming remainder of %s is 0.' % rule)
category_tree[rule][None] = 0
return category_tree, total, remainder_units
def flatten(category_tree, header=''):
"""Flattens a category tree into a flat list."""
result = []
for rule, sub in category_tree.iteritems():
if not rule:
rule = 'remaining'
if header:
flattened_rule = header + '>' + rule
else:
flattened_rule = rule
if isinstance(sub, dict) or isinstance(sub, OrderedDict):
result.extend(flatten(sub, flattened_rule))
else:
result.append((flattened_rule, sub))
return result
def print_category_tree(category_tree, output, depth=0):
"""Prints a category tree in a human-readable format."""
for label in category_tree:
print >> output, (' ' * depth),
if (isinstance(category_tree[label], dict) or
isinstance(category_tree[label], OrderedDict)):
print >> output, '%s:' % label
print_category_tree(category_tree[label], output, depth + 1)
else:
print >> output, '%s: %d' % (label, category_tree[label])
def flatten_all_category_trees(category_trees):
flattened_labels = set()
flattened_table = []
for category_tree in category_trees:
flattened = OrderedDict()
for label, subtotal in flatten(category_tree):
flattened_labels.add(label)
flattened[label] = subtotal
flattened_table.append(flattened)
return flattened_labels, flattened_table
def output_csv(output, category_trees, data, first_time, output_exponent):
flattened_labels, flattened_table = flatten_all_category_trees(category_trees)
sorted_flattened_labels = sorted(flattened_labels)
print >> output, ','.join(['second'] + sorted_flattened_labels)
for index, row in enumerate(flattened_table):
values = [str(data['snapshots'][index]['time'] - first_time)]
for label in sorted_flattened_labels:
if label in row:
divisor = 1
if output_exponent.upper() == 'K':
divisor = 1024.0
elif output_exponent.upper() == 'M':
divisor = 1024.0 * 1024.0
values.append(str(row[label] / divisor))
else:
values.append('0')
print >> output, ','.join(values)
def output_json(output, category_trees, data, first_time, template_label):
flattened_labels, flattened_table = flatten_all_category_trees(category_trees)
json_snapshots = []
for index, row in enumerate(flattened_table):
row_with_meta = row.copy()
row_with_meta['second'] = data['snapshots'][index]['time'] - first_time
row_with_meta['dump_time'] = datetime.datetime.fromtimestamp(
data['snapshots'][index]['time']).strftime('%Y-%m-%d %H:%M:%S')
json_snapshots.append(row_with_meta)
json_root = {
'version': 'JSON_DEEP_2',
'policies': {
template_label: {
'legends': sorted(flattened_labels),
'snapshots': json_snapshots
}
}
}
json.dump(json_root, output, indent=2, sort_keys=True)
def output_tree(output, category_trees):
for index, category_tree in enumerate(category_trees):
print >> output, '< Snapshot #%d >' % index
print_category_tree(category_tree, output, 1)
print >> output, ''
def do_main(cat_input, output, template_label, output_format, output_exponent):
"""Does the main work: accumulate for every snapshot and print a result."""
if output_format not in ['csv', 'json', 'tree']:
raise NotImplementedError('The output format \"%s\" is not implemented.' %
output_format)
if output_exponent.upper() not in ['B', 'K', 'M']:
raise NotImplementedError('The exponent \"%s\" is not implemented.' %
output_exponent)
data = json.loads(cat_input.read(), object_pairs_hook=OrderedDict)
templates = data['templates']
if not template_label:
template_label = data['default_template']
if template_label not in templates:
LOGGER.error('A template \'%s\' is not found.' % template_label)
return
template = templates[template_label]
category_trees = []
first_time = None
for snapshot in data['snapshots']:
if not first_time:
first_time = snapshot['time']
units = {}
for world_name in snapshot['worlds']:
world_units = {}
for unit_id, sizes in snapshot['worlds'][world_name]['units'].iteritems():
world_units[int(unit_id)] = sizes[0]
units[world_name] = world_units
category_tree, _, _ = accumulate(
template, snapshot['worlds'], units, set(units[template[0]].keys()))
category_trees.append(category_tree)
if output_format == 'csv':
output_csv(output, category_trees, data, first_time, output_exponent)
elif output_format == 'json':
output_json(output, category_trees, data, first_time, template_label)
elif output_format == 'tree':
output_tree(output, category_trees)
def main():
LOGGER.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
parser = optparse.OptionParser()
parser.add_option('-t', '--template', dest='template',
metavar='TEMPLATE',
help='Apply TEMPLATE to list up.')
parser.add_option('-f', '--format', dest='format', default='csv',
help='Specify the output format: csv, json or tree.')
parser.add_option('-e', '--exponent', dest='exponent', default='M',
help='Specify B (bytes), K (kilobytes) or M (megabytes).')
options, _ = parser.parse_args(sys.argv)
do_main(sys.stdin, sys.stdout,
options.template, options.format, options.exponent)
if __name__ == '__main__':
sys.exit(main())
| 0.010591 |
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manipulators that can edit SON objects as they enter and exit a database.
New manipulators should be defined as subclasses of SONManipulator and can be
installed on a database by calling
`pymongo.database.Database.add_son_manipulator`."""
from bson.dbref import DBRef
from bson.objectid import ObjectId
from bson.son import SON
class SONManipulator(object):
"""A base son manipulator.
This manipulator just saves and restores objects without changing them.
"""
def will_copy(self):
"""Will this SON manipulator make a copy of the incoming document?
Derived classes that do need to make a copy should override this
method, returning True instead of False. All non-copying manipulators
will be applied first (so that the user's document will be updated
appropriately), followed by copying manipulators.
"""
return False
def transform_incoming(self, son, collection):
"""Manipulate an incoming SON object.
:Parameters:
- `son`: the SON object to be inserted into the database
- `collection`: the collection the object is being inserted into
"""
if self.will_copy():
return SON(son)
return son
def transform_outgoing(self, son, collection):
"""Manipulate an outgoing SON object.
:Parameters:
- `son`: the SON object being retrieved from the database
- `collection`: the collection this object was stored in
"""
if self.will_copy():
return SON(son)
return son
class ObjectIdInjector(SONManipulator):
"""A son manipulator that adds the _id field if it is missing.
"""
def transform_incoming(self, son, collection):
"""Add an _id field if it is missing.
"""
if not "_id" in son:
son["_id"] = ObjectId()
return son
# This is now handled during BSON encoding (for performance reasons),
# but I'm keeping this here as a reference for those implementing new
# SONManipulators.
class ObjectIdShuffler(SONManipulator):
"""A son manipulator that moves _id to the first position.
"""
def will_copy(self):
"""We need to copy to be sure that we are dealing with SON, not a dict.
"""
return True
def transform_incoming(self, son, collection):
"""Move _id to the front if it's there.
"""
if not "_id" in son:
return son
transformed = SON({"_id": son["_id"]})
transformed.update(son)
return transformed
class NamespaceInjector(SONManipulator):
"""A son manipulator that adds the _ns field.
"""
def transform_incoming(self, son, collection):
"""Add the _ns field to the incoming object
"""
son["_ns"] = collection.name
return son
class AutoReference(SONManipulator):
"""Transparently reference and de-reference already saved embedded objects.
This manipulator should probably only be used when the NamespaceInjector is
also being used, otherwise it doesn't make too much sense - documents can
only be auto-referenced if they have an *_ns* field.
NOTE: this will behave poorly if you have a circular reference.
TODO: this only works for documents that are in the same database. To fix
this we'll need to add a DatabaseInjector that adds *_db* and then make
use of the optional *database* support for DBRefs.
"""
def __init__(self, db):
self.__database = db
def will_copy(self):
"""We need to copy so the user's document doesn't get transformed refs.
"""
return True
def transform_incoming(self, son, collection):
"""Replace embedded documents with DBRefs.
"""
def transform_value(value):
if isinstance(value, dict):
if "_id" in value and "_ns" in value:
return DBRef(value["_ns"], transform_value(value["_id"]))
else:
return transform_dict(SON(value))
elif isinstance(value, list):
return [transform_value(v) for v in value]
return value
def transform_dict(object):
for (key, value) in object.items():
object[key] = transform_value(value)
return object
return transform_dict(SON(son))
def transform_outgoing(self, son, collection):
"""Replace DBRefs with embedded documents.
"""
def transform_value(value):
if isinstance(value, DBRef):
return self.__database.dereference(value)
elif isinstance(value, list):
return [transform_value(v) for v in value]
elif isinstance(value, dict):
return transform_dict(SON(value))
return value
def transform_dict(object):
for (key, value) in object.items():
object[key] = transform_value(value)
return object
return transform_dict(SON(son))
# TODO make a generic translator for custom types. Take encode, decode,
# should_encode and should_decode functions and just encode and decode where
# necessary. See examples/custom_type.py for where this would be useful.
# Alternatively it could take a should_encode, to_binary, from_binary and
# binary subtype.
| 0.000335 |
#!/usr/bin/env python
import sys, re, codecs
from plasTeX import Base
try: import pygments
except: pygments = None
class listingsname(Base.Command):
unicode = 'Listing'
PackageOptions = {}
def ProcessOptions(options, document):
document.context.newcounter('listings',
resetby='chapter',
format='${thechapter}.${listings}')
PackageOptions.update(options)
class lstset(Base.Command):
args = 'arguments:dict'
def invoke(self, tex):
Base.Command.invoke(self, tex)
if 'language' in self.attributes['arguments']:
self.ownerDocument.context.current_language = \
self.attributes['arguments']['language']
class lstlisting(Base.verbatim):
args = '[ arguments:dict ]'
counter = 'listings'
def invoke(self, tex):
if self.macroMode == Base.Environment.MODE_END:
return
s = ''.join(Base.verbatim.invoke(self, tex)[1:]).replace('\r','').split('\n')
_format(self, s)
class lstinline(Base.verb):
args = '[ arguments:dict ]'
def invoke(self, tex):
_format(self, ''.join(Base.verb.invoke(self, tex)[2:-1]))
class lstinputlisting(Base.Command):
args = '[ arguments:dict ] file:str'
counter = 'listings'
def invoke(self, tex):
Base.Command.invoke(self, tex)
if 'file' not in self.attributes or not self.attributes['file']:
raise ValueError('Malformed \\lstinputlisting macro.')
_format(self, codecs.open(self.attributes['file'], 'r',
self.config['files']['input-encoding'], 'replace'))
def _format(self, file):
if self.attributes['arguments'] is None:
self.attributes['arguments'] = {}
linenos = False
if 'numbers' in self.attributes['arguments'] or 'numbers' in PackageOptions:
linenos = 'inline'
# If this listing includes a label, inform plasTeX.
if 'label' in self.attributes['arguments']:
if hasattr(self.attributes['arguments']['label'], 'textContent'):
self.ownerDocument.context.label(
self.attributes['arguments']['label'].textContent)
else:
self.ownerDocument.context.label(
self.attributes['arguments']['label'])
# Check the textual LaTeX arguments and convert them to Python
# attributes.
if 'firstline' in self.attributes['arguments']:
first_line_number = int(self.attributes['arguments']['firstline'])
else:
first_line_number = 0
if 'lastline' in self.attributes['arguments']:
last_line_number = int(self.attributes['arguments']['lastline'])
else:
last_line_number = sys.maxint
# Read the file, all the while respecting the "firstline" and
# "lastline" arguments given in the document.
self.plain_listing = ''
for current_line_number, line in enumerate(file):
current_line_number += 1
if (current_line_number >= first_line_number) and \
(current_line_number <= last_line_number):
# Remove single-line "listings" comments. Only
# comments started by "/*@" and ended by "@*/" are
# supported.
line = re.sub('/\*@[^@]*@\*/', '', line)
# Add the just-read line to the listing.
self.plain_listing += '\n' + line
# Create a syntax highlighted XHTML version of the file using Pygments
if pygments is not None:
from pygments import lexers, formatters
try:
lexer = lexers.get_lexer_by_name(self.ownerDocument.context.current_language.lower())
except Exception, msg:
lexer = lexers.TextLexer()
self.xhtml_listing = pygments.highlight(self.plain_listing, lexer, formatters.HtmlFormatter(linenos=linenos))
| 0.007838 |
# $Id: setup-vc.py 4121 2012-05-14 10:42:56Z bennylp $
#
# pjsua Setup script for Visual Studio
#
# Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from distutils.core import setup, Extension
import os
import sys
# Find version
pj_version=""
pj_version_major=""
pj_version_minor=""
pj_version_rev=""
pj_version_suffix=""
f = open('../../../version.mak', 'r')
for line in f:
if line.find("export PJ_VERSION_MAJOR") != -1:
tokens=line.split("=")
if len(tokens)>1:
pj_version_major= tokens[1].strip()
elif line.find("export PJ_VERSION_MINOR") != -1:
tokens=line.split("=")
if len(tokens)>1:
pj_version_minor= line.split("=")[1].strip()
elif line.find("export PJ_VERSION_REV") != -1:
tokens=line.split("=")
if len(tokens)>1:
pj_version_rev= line.split("=")[1].strip()
elif line.find("export PJ_VERSION_SUFFIX") != -1:
tokens=line.split("=")
if len(tokens)>1:
pj_version_suffix= line.split("=")[1].strip()
f.close()
if not pj_version_major:
print 'Unable to get PJ_VERSION_MAJOR'
sys.exit(1)
pj_version = pj_version_major + "." + pj_version_minor
if pj_version_rev:
pj_version += "." + pj_version_rev
if pj_version_suffix:
pj_version += "-" + pj_version_suffix
#print 'PJ_VERSION = "'+ pj_version + '"'
# Check that extension has been built
if not os.access('../../lib/_pjsua.pyd', os.R_OK):
print 'Error: file "../../lib/_pjsua.pyd" does not exist!'
print ''
print 'Please build the extension with Visual Studio first'
print 'For more info, see http://trac.pjsip.org/repos/wiki/Python_SIP_Tutorial'
sys.exit(1)
setup(name="pjsua",
version=pj_version,
description='SIP User Agent Library based on PJSIP',
url='http://trac.pjsip.org/repos/wiki/Python_SIP_Tutorial',
data_files=[('lib/site-packages', ['../../lib/_pjsua.pyd'])],
py_modules=["pjsua"]
)
| 0.018985 |
"""Bounce last published links so they can be rescanned. Drops metadata."""
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django.conf import settings
from gruntle.memebot.models import Link
MIN_LINKS = 25
DATEFMT = '%Y-%m-%d %H:%M:%S'
def get_max_links():
from gruntle.memebot.feeds import get_feeds
return max([getattr(f[1], 'max_links', 0) for f in get_feeds()] + [MIN_LINKS])
class Command(NoArgsCommand):
help = __doc__
option_list = (make_option('-c', dest='count', default=get_max_links(), type='int',
help="number of links to bounce [%default]"),
make_option('-r', dest='reset', default=False, action='store_true',
help="also reset published date/id (normally retained)"),
make_option('-n', dest='dry_run', default=False, action='store_true',
help="don't write to db, only show matching links"),
) + NoArgsCommand.option_list
def handle_noargs(self, count=None, reset=False, dry_run=False, **kwargs):
if count is None:
count = get_max_links()
if count <= 0:
raise CommandError('must specify at least one for bouncing')
links = Link.objects.all()
pending_count = links.filter(state='new').count()
if pending_count >= count:
raise CommandError('there are already %d links pending scan' % pending_count)
elif pending_count > 0:
count -= pending_count
print >> sys.stderr, '%d links already pending, reducing count to %d bounces' % (pending_count, count)
pub_links = links.filter(state='published').order_by('-published')[:count]
nlinks = pub_links.count()
for i, link in enumerate(pub_links):
if dry_run:
verb = 'Would reset'
else:
verb = 'Reset'
link.state = 'new'
link.error_count = 0
link.resolved_url = None
link.content_type = None
link.content = None
link.title = None
link.scanner = None
link.attr_storage = None
if reset:
link.published = None
link.publish_id = None
link.save()
print '[%d/%d] %s: %s <%s> %s' % (
i + 1,
nlinks,
verb,
link.created.strftime(DATEFMT),
link.user.username,
link.url,
)
| 0.003704 |
#!/usr/bin/env python3
import argparse
import fcntl
import json
import os
import re
import struct
import subprocess
import sys
import xml.etree.ElementTree
_KEYS = {
"cpuid": ["eax_in", "ecx_in"],
"msr": ["index"],
}
_REGS = {
"cpuid": ["eax", "ebx", "ecx", "edx"],
"msr": ["eax", "edx"],
}
def gather_name(args):
if args.name:
return args.name
with open("/proc/cpuinfo", "rt") as f:
for line in f.readlines():
if line.startswith("model name"):
return line.split(":", 2)[1].strip()
exit("Error: '/proc/cpuinfo' does not contain a model name.\n"
"Use '--model' to set a model name.")
def gather_cpuid_leaves_cpuid(output):
leave_pattern = re.compile(
"^\\s*"
"(0x[0-9a-f]+)\\s*"
"(0x[0-9a-f]+):\\s*"
"eax=(0x[0-9a-f]+)\\s*"
"ebx=(0x[0-9a-f]+)\\s*"
"ecx=(0x[0-9a-f]+)\\s*"
"edx=(0x[0-9a-f]+)\\s*$")
for line in output.split("\n"):
match = leave_pattern.match(line)
if not match:
continue
yield {
"eax_in": int(match.group(1), 0),
"ecx_in": int(match.group(2), 0),
"eax": int(match.group(3), 0),
"ebx": int(match.group(4), 0),
"ecx": int(match.group(5), 0),
"edx": int(match.group(6), 0)}
def gather_cpuid_leaves_kcpuid(output):
leave_pattern = re.compile(
"^(0x[0-9a-f]+): "
"EAX=(0x[0-9a-f]+), "
"EBX=(0x[0-9a-f]+), "
"ECX=(0x[0-9a-f]+), "
"EDX=(0x[0-9a-f]+)$")
branch_pattern_head = re.compile(
"^(0x[0-9a-f]+): "
"subleafs:$")
branch_pattern_body = re.compile(
"^\\s*([0-9]+): "
"EAX=(0x[0-9a-f]+), "
"EBX=(0x[0-9a-f]+), "
"ECX=(0x[0-9a-f]+), "
"EDX=(0x[0-9a-f]+)$")
regs = list()
eax_in = 0
for line in output.split("\n"):
match = branch_pattern_head.match(line)
if match:
eax_in = int(match.group(1), 0)
continue
match = branch_pattern_body.match(line)
if match:
regs.append({
"eax_in": eax_in,
"ecx_in": int(match.group(1), 0),
"eax": int(match.group(2), 0),
"ebx": int(match.group(3), 0),
"ecx": int(match.group(4), 0),
"edx": int(match.group(5), 0)})
continue
match = leave_pattern.match(line)
if match:
regs.append({
"eax_in": int(match.group(1), 0),
"ecx_in": 0,
"eax": int(match.group(2), 0),
"ebx": int(match.group(3), 0),
"ecx": int(match.group(4), 0),
"edx": int(match.group(5), 0)})
continue
return regs
def gather_cpuid_leaves(args):
def mask(regs, eax_in, ecx_in, eax_mask, ebx_mask, ecx_mask, edx_mask):
if regs["eax_in"] == eax_in and regs["ecx_in"] == ecx_in:
regs["eax"] &= eax_mask
regs["ebx"] &= ebx_mask
regs["ecx"] &= ecx_mask
regs["edx"] &= edx_mask
cpuid = args.path_to_cpuid or "cpuid"
try:
output = subprocess.check_output(
[cpuid, "-r" if "kcpuid" in cpuid else "-1r"],
universal_newlines=True)
except FileNotFoundError as e:
exit("Error: '{}' not found.\n'cpuid' can be usually found in a "
"package named identically. If your distro does not provide such "
"package, you can find the sources or binary packages at "
"'http://www.etallen.com/cpuid.html'.".format(e.filename))
if "=====" in output:
reglist = gather_cpuid_leaves_kcpuid(output)
else:
reglist = gather_cpuid_leaves_cpuid(output)
for regs in reglist:
# local apic id. Pretend to always run on logical processor #0.
mask(regs, 0x01, 0x00, 0xffffffff, 0x00ffffff, 0xffffffff, 0xffffffff)
mask(regs, 0x0b, 0x00, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00)
mask(regs, 0x0b, 0x01, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00)
yield regs
def gather_msr():
msrs = dict()
addresses = [
0x10a, # IA32_ARCH_CAPABILITIES_MSR
0xcf, # IA32_CORE_CAPABILITY_MSR
]
KVM_GET_MSRS = 0xc008ae88
try:
with open("/dev/cpu/0/msr", "rb") as f:
for addr in addresses:
f.seek(addr)
buf = f.read(8)
msrs[addr] = struct.unpack("=Q", buf)[0]
return "", msrs
except IOError as e:
print("Warning: {}".format(e), file=sys.stderr)
try:
with open("/dev/kvm", "rb") as f:
for addr in addresses:
bufIn = struct.pack("=LLLLQ", 1, 0, addr, 0, 0)
bufOut = fcntl.ioctl(f, KVM_GET_MSRS, bufIn)
msrs[addr] = struct.unpack("=LLLLQ", bufOut)[4]
return " via KVM", msrs
except IOError as e:
print("Warning: {}".format(e), file=sys.stderr)
return None, {}
def call_qemu(qemu, qmp_cmds):
cmd = [
qemu,
"-machine", "accel=kvm",
"-cpu", "host",
"-nodefaults",
"-nographic",
"-qmp", "stdio"]
stdin = list()
stdin.append("{\"execute\": \"qmp_capabilities\"}")
stdin.extend([json.dumps(o) for o in qmp_cmds])
stdin.append("{\"execute\": \"quit\"}")
try:
output = subprocess.check_output(
cmd,
universal_newlines=True,
input="\n".join(stdin))
except subprocess.CalledProcessError:
exit("Error: Non-zero exit code from '{}'.".format(qemu))
except FileNotFoundError:
exit("Error: File not found: '{}'.".format(qemu))
for line in output.split("\n"):
if not line:
continue
response = json.loads(line)
if "return" in response and not response["return"]:
continue
if response.get("event") == "SHUTDOWN":
continue
yield response
def gather_model(args):
output = call_qemu(args.path_to_qemu, [
{
"execute": "query-cpu-model-expansion",
"arguments":
{
"type": "static",
"model": {"name": "host"}
},
"id": "model-expansion"
}])
static_model = None
for o in output:
if o.get("id") == "model-expansion":
static_model = o["return"]["model"]
if static_model:
return call_qemu(args.path_to_qemu, [
{
"execute": "query-cpu-model-expansion",
"arguments":
{
"type": "full",
"model": static_model
},
"id": "model-expansion"
},
{
"execute": "query-cpu-definitions",
"id": "definitions"
}
])
else:
return call_qemu(args.path_to_qemu, [
{
"execute": "qom-get",
"arguments":
{
"path": "/machine/unattached/device[0]",
"property": "feature-words"
},
"id": "feature-words"
},
{
"execute": "qom-get",
"arguments":
{
"path": "/machine/unattached/device[0]",
"property": "family"
},
"id": "family"
},
{
"execute": "qom-get",
"arguments":
{
"path": "/machine/unattached/device[0]",
"property": "model"
},
"id": "model"
},
{
"execute": "qom-get",
"arguments":
{
"path": "/machine/unattached/device[0]",
"property": "stepping"
},
"id": "stepping"
},
{
"execute": "qom-get",
"arguments":
{
"path": "/machine/unattached/device[0]",
"property": "model-id"
},
"id": "model-id"
},
{
"execute": "query-cpu-definitions",
"id": "definitions"
}
])
def gather(args):
result = dict()
result["name"] = gather_name(args)
result["leaves"] = list(gather_cpuid_leaves(args))
result["via"], result["msr"] = gather_msr()
result["model"] = list(gather_model(args))
return result
def parse_filename(data):
filename = data["name"].strip()
filename = re.sub("[ -]+ +", " ", filename)
filename = re.sub("\\(([Rr]|[Tt][Mm])\\)", "", filename)
filename = re.sub(".*(Intel|AMD) ", "", filename)
filename = re.sub(" (Duo|Quad|II X[0-9]+)", " ", filename)
filename = re.sub(" (CPU|[Pp]rocessor)", "", filename)
filename = re.sub(" @.*", "", filename)
filename = re.sub(" APU .*", "", filename)
filename = re.sub(" SE$", "", filename)
filename = re.sub(" ", "-", filename)
return "x86_64-cpuid-{}".format(filename)
def output_xml(data, filename):
leave_template = \
" <cpuid" \
" eax_in='0x{0[eax_in]:08x}'" \
" ecx_in='0x{0[ecx_in]:02x}'" \
" eax='0x{0[eax]:08x}'" \
" ebx='0x{0[ebx]:08x}'" \
" ecx='0x{0[ecx]:08x}'" \
" edx='0x{0[edx]:08x}'" \
"/>\n"
msr_template = " <msr index='0x{:x}' edx='0x{:08x}' eax='0x{:08x}'/>\n"
print(filename)
with open(filename, "wt") as f:
f.write("<!-- {} -->\n".format(data["name"]))
f.write("<cpudata arch='x86'>\n")
for leave in data["leaves"]:
f.write(leave_template.format(leave))
for key, value in sorted(data["msr"].items()):
f.write(msr_template.format(
int(key),
0xffffffff & (value >> 32),
0xffffffff & (value >> 0)))
f.write("</cpudata>\n")
def output_json(data, filename):
replies = list()
for reply in data["model"]:
if "QMP" in reply:
continue
if "timestamp" in reply:
continue
if "return" in reply and not reply["return"]:
continue
replies.append(reply)
if not replies:
return
if "model-expansion" not in [reply.get("id") for reply in replies]:
exit(
"Error: Missing query-cpu-model-expansion reply in "
"{}".format(filename))
print(filename)
with open(filename, "wt") as f:
for reply in replies:
if reply is not replies[0]:
f.write("\n")
json.dump(reply, f, indent=2)
f.write("\n")
def parse(args, data):
filename = parse_filename(data)
filename_xml = "{}.xml".format(filename)
filename_json = "{}.json".format(filename)
output_xml(data, filename_xml)
output_json(data, filename_json)
if not os.path.isfile(filename_json):
return
if os.path.getsize(filename_json) == 0:
return
args.json_files = getattr(args, "json_files", list()) + [filename_json]
def checkFeature(cpuData, feature):
for key in ["type"] + _KEYS.get(feature["type"], list()):
if feature[key] not in cpuData:
return False
cpuData = cpuData[feature[key]]
for reg in _REGS.get(feature["type"], list()):
if feature[reg] > 0 and feature[reg] == feature[reg] & cpuData[reg]:
return True
return False
def addFeature(cpuData, feature):
for key in ["type"] + _KEYS.get(feature["type"], list()):
if feature[key] not in cpuData:
cpuData[feature[key]] = dict()
cpuData = cpuData[feature[key]]
for reg in _REGS.get(feature["type"], list()):
cpuData[reg] = cpuData.get(reg, 0) | feature[reg]
def parseQemu(path, features):
cpuData = {}
with open(path, "r") as f:
data, pos = json.JSONDecoder().raw_decode(f.read())
for (prop, val) in data["return"]["model"]["props"].items():
if val and prop in features:
addFeature(cpuData, features[prop])
return cpuData
def parseCPUData(path):
cpuData = dict()
for f in xml.etree.ElementTree.parse(path).getroot():
if f.tag not in ("cpuid", "msr"):
continue
feature = {"type": f.tag}
for reg in _KEYS[f.tag] + _REGS[f.tag]:
feature[reg] = int(f.attrib.get(reg, "0"), 0)
addFeature(cpuData, feature)
return cpuData
def parseMap():
path = os.path.dirname(sys.argv[0])
path = os.path.join(path, "..", "..", "src", "cpu_map", "x86_features.xml")
cpuMap = dict()
for f in xml.etree.ElementTree.parse(path).getroot().iter("feature"):
if f[0].tag not in ("cpuid", "msr"):
continue
feature = {"type": f[0].tag}
for reg in _KEYS[f[0].tag] + _REGS[f[0].tag]:
feature[reg] = int(f[0].attrib.get(reg, "0"), 0)
cpuMap[f.attrib["name"]] = feature
return cpuMap
def formatCPUData(cpuData, path, comment):
print(path)
with open(path, "w") as f:
f.write("<!-- " + comment + " -->\n")
f.write("<cpudata arch='x86'>\n")
cpuid = cpuData["cpuid"]
for eax_in in sorted(cpuid.keys()):
for ecx_in in sorted(cpuid[eax_in].keys()):
leaf = cpuid[eax_in][ecx_in]
line = (" <cpuid eax_in='0x%08x' ecx_in='0x%02x' "
"eax='0x%08x' ebx='0x%08x' "
"ecx='0x%08x' edx='0x%08x'/>\n")
f.write(line % (
eax_in, ecx_in,
leaf["eax"], leaf["ebx"], leaf["ecx"], leaf["edx"]))
if "msr" in cpuData:
msr = cpuData["msr"]
for index in sorted(msr.keys()):
f.write(" <msr index='0x%x' edx='0x%08x' eax='0x%08x'/>\n" %
(index, msr[index]['edx'], msr[index]['eax']))
f.write("</cpudata>\n")
def diff(args):
cpuMap = parseMap()
for jsonFile in args.json_files:
cpuDataFile = jsonFile.replace(".json", ".xml")
enabledFile = jsonFile.replace(".json", "-enabled.xml")
disabledFile = jsonFile.replace(".json", "-disabled.xml")
cpuData = parseCPUData(cpuDataFile)
qemu = parseQemu(jsonFile, cpuMap)
enabled = dict()
disabled = dict()
for feature in cpuMap.values():
if checkFeature(qemu, feature):
addFeature(enabled, feature)
elif checkFeature(cpuData, feature):
addFeature(disabled, feature)
formatCPUData(enabled, enabledFile, "Features enabled by QEMU")
formatCPUData(disabled, disabledFile, "Features disabled by QEMU")
def main():
parser = argparse.ArgumentParser(description="Gather cpu test data")
parser.add_argument(
"--name",
help="CPU model name. "
"If unset, model name is read from '/proc/cpuinfo'.")
parser.add_argument(
"--path-to-cpuid",
metavar="PATH",
help="Path to 'cpuid' utility. "
"If unset, the first executable 'cpuid' in $PATH is used.")
parser.add_argument(
"--path-to-qemu",
metavar="PATH",
help="Path to qemu. "
"If unset, will try '/usr/bin/qemu-system-x86_64', "
"'/usr/bin/qemu-kvm', and '/usr/libexec/qemu-kvm'.")
subparsers = parser.add_subparsers(dest="action")
subparsers.add_parser(
"gather",
help="Acquire data on target system and outputs to stdout. "
"This is the default. ")
subparsers.add_parser(
"parse",
help="Reads data from stdin and parses data for libvirt use.")
subparsers.add_parser(
"full",
help="Equivalent to `cpu-data.py gather | cpu-data.py parse`.")
diffparser = subparsers.add_parser(
"diff",
help="Diff json description of CPU model against known features.")
diffparser.add_argument(
"json_files",
nargs="+",
metavar="FILE",
type=os.path.realpath,
help="Path to one or more json CPU model descriptions.")
args = parser.parse_args()
if not args.action:
args.action = "gather"
if not args.path_to_qemu:
args.path_to_qemu = "qemu-system-x86_64"
search = [
"/usr/bin/qemu-system-x86_64",
"/usr/bin/qemu-kvm",
"/usr/libexec/qemu-kvm"]
for f in search:
if os.path.isfile(f):
args.path_to_qemu = f
if args.action in ["gather", "full"]:
data = gather(args)
if args.action == "gather":
json.dump(data, sys.stdout, indent=2)
if args.action in ["parse", "full"]:
if args.action == "parse":
data = json.load(sys.stdin)
parse(args, data)
if "json_files" in args:
diff(args)
if __name__ == "__main__":
main()
| 0 |
# -*- coding: utf-8 -*-
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("username").click()
wd.find_element_by_name("username").clear()
wd.find_element_by_name("username").send_keys(username)
wd.find_element_by_name("password").click()
wd.find_element_by_name("password").clear()
wd.find_element_by_name("password").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_css_selector("td.login-info-left span").text
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in() > 0:
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password) | 0.000684 |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines some model classes related BGP.
These class include types used in saving information sent/received over BGP
sessions.
"""
import abc
from abc import ABCMeta
from abc import abstractmethod
from copy import copy
import logging
import netaddr
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RouteTargetMembershipNLRI
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES
from ryu.lib.packet.bgp import BGPPathAttributeLocalPref
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH
from ryu.services.protocols.bgp.base import OrderedDict
from ryu.services.protocols.bgp.constants import VPN_TABLE
from ryu.services.protocols.bgp.constants import VRF_TABLE
from ryu.services.protocols.bgp.model import OutgoingRoute
from ryu.services.protocols.bgp.processor import BPR_ONLY_PATH
from ryu.services.protocols.bgp.processor import BPR_UNKNOWN
LOG = logging.getLogger('bgpspeaker.info_base.base')
class Table(object):
"""A container for holding information about destination/prefixes.
Routing information base for a particular afi/safi.
This is a base class which should be sub-classed for different route
family. A table can be uniquely identified by (Route Family, Scope Id).
"""
__metaclass__ = abc.ABCMeta
ROUTE_FAMILY = RF_IPv4_UC
def __init__(self, scope_id, core_service, signal_bus):
self._destinations = dict()
# Scope in which this table exists.
# If this table represents the VRF, then this could be a VPN ID.
# For global/VPN tables this should be None
self._scope_id = scope_id
self._signal_bus = signal_bus
self._core_service = core_service
@property
def route_family(self):
return self.__class__.ROUTE_FAMILY
@property
def core_service(self):
return self._core_service
@property
def scope_id(self):
return self._scope_id
@abstractmethod
def _create_dest(self, nlri):
"""Creates destination specific for this table.
Returns destination that stores information of paths to *nlri*.
"""
raise NotImplementedError()
def itervalues(self):
return self._destinations.itervalues()
def insert(self, path):
self._validate_path(path)
self._validate_nlri(path.nlri)
if path.is_withdraw:
updated_dest = self._insert_withdraw(path)
else:
updated_dest = self._insert_path(path)
return updated_dest
def insert_sent_route(self, sent_route):
self._validate_path(sent_route.path)
dest = self._get_or_create_dest(sent_route.path.nlri)
dest.add_sent_route(sent_route)
def _insert_path(self, path):
"""Add new path to destination identified by given prefix.
"""
assert path.is_withdraw is False
dest = self._get_or_create_dest(path.nlri)
# Add given path to matching Dest.
dest.add_new_path(path)
# Return updated destination.
return dest
def _insert_withdraw(self, path):
"""Appends given path to withdraw list of Destination for given prefix.
"""
assert path.is_withdraw is True
dest = self._get_or_create_dest(path.nlri)
# Add given path to matching destination.
dest.add_withdraw(path)
# Return updated destination.
return dest
def cleanup_paths_for_peer(self, peer):
"""Remove old paths from whose source is `peer`
Old paths have source version number that is less than current peer
version number. Also removes sent paths to this peer.
"""
LOG.debug('Cleaning paths from table %s for peer %s', self, peer)
for dest in self.itervalues():
# Remove paths learned from this source
paths_deleted = dest.remove_old_paths_from_source(peer)
# Remove sent paths to this peer
had_sent = dest.remove_sent_route(peer)
if had_sent:
LOG.debug('Removed sent route %s for %s', dest.nlri, peer)
# If any paths are removed we enqueue respective destination for
# future processing.
if paths_deleted:
self._signal_bus.dest_changed(dest)
def clean_uninteresting_paths(self, interested_rts):
"""Cleans table of any path that do not have any RT in common
with `interested_rts`.
Parameters:
- `interested_rts`: (set) of RT that are of interest/that need to
be preserved
"""
LOG.debug('Cleaning table %s for given interested RTs %s',
self, interested_rts)
uninteresting_dest_count = 0
for dest in self.itervalues():
added_withdraw = \
dest.withdraw_unintresting_paths(interested_rts)
if added_withdraw:
self._signal_bus.dest_changed(dest)
uninteresting_dest_count += 1
return uninteresting_dest_count
def delete_dest_by_nlri(self, nlri):
"""Deletes the destination identified by given prefix.
Returns the deleted destination if a match is found. If not match is
found return None.
"""
self._validate_nlri(nlri)
dest = self._get_dest(nlri)
if dest:
self._destinations.pop(dest)
return dest
def delete_dest(self, dest):
del self._destinations[self._table_key(dest.nlri)]
def _validate_nlri(self, nlri):
"""Validated *nlri* is the type that this table stores/supports.
"""
if not nlri or not (nlri.ROUTE_FAMILY == self.route_family):
raise ValueError('Invalid Vpnv4 prefix given.')
def _validate_path(self, path):
"""Check if given path is an instance of *Path*.
Raises ValueError if given is not a instance of *Path*.
"""
if not path or not (path.route_family == self.route_family):
raise ValueError('Invalid path. Expected instance of'
' Vpnv4 route family path, got %s.' % path)
def _get_or_create_dest(self, nlri):
table_key = self._table_key(nlri)
dest = self._destinations.get(table_key)
# If destination for given prefix does not exist we create it.
if dest is None:
dest = self._create_dest(nlri)
self._destinations[table_key] = dest
return dest
def _get_dest(self, nlri):
table_key = self._table_key(nlri)
dest = self._destinations.get(table_key)
return dest
def is_for_vrf(self):
"""Returns true if this table instance represents a VRF.
"""
return self.scope_id is not None
def __str__(self):
return 'Table(scope_id: %s, rf: %s)' % (self.scope_id,
self.route_family)
@abstractmethod
def _table_key(self, nlri):
"""Return a key that will uniquely identify this NLRI inside
this table.
"""
raise NotImplementedError()
class NonVrfPathProcessingMixin(object):
"""Mixin reacting to best-path selection algorithm on main table
level. Intended to use with "Destination" subclasses.
Applies to most of Destinations except for VrfDest
because they are processed at VRF level, so different logic applies.
"""
def _best_path_lost(self):
self._best_path = None
if self._sent_routes:
# We have to send update-withdraw to all peers to whom old best
# path was sent.
for sent_route in self._sent_routes.values():
sent_path = sent_route.path
withdraw_clone = sent_path.clone(for_withdrawal=True)
outgoing_route = OutgoingRoute(withdraw_clone)
sent_route.sent_peer.enque_outgoing_msg(outgoing_route)
LOG.debug('Sending withdrawal to %s for %s',
sent_route.sent_peer, outgoing_route)
# Have to clear sent_route list for this destination as
# best path is removed.
self._sent_routes = {}
def _new_best_path(self, new_best_path):
old_best_path = self._best_path
self._best_path = new_best_path
LOG.debug('New best path selected for destination %s', self)
# If old best path was withdrawn
if (old_best_path and old_best_path not in self._known_path_list
and self._sent_routes):
# Have to clear sent_route list for this destination as
# best path is removed.
self._sent_routes = {}
# Communicate that we have new best path to all qualifying
# bgp-peers.
pm = self._core_service.peer_manager
pm.comm_new_best_to_bgp_peers(new_best_path)
# withdraw old best path
if old_best_path and self._sent_routes:
for sent_route in self._sent_routes.values():
sent_path = sent_route.path
withdraw_clone = sent_path.clone(for_withdrawal=True)
outgoing_route = OutgoingRoute(withdraw_clone)
sent_route.sent_peer.enque_outgoing_msg(outgoing_route)
LOG.debug('Sending withdrawal to %s for %s',
sent_route.sent_peer, outgoing_route)
self._sent_routes = {}
class Destination(object):
"""State about a particular destination.
For example, an IP prefix. This is the data-structure that is hung of the
a routing information base table *Table*.
"""
__metaclass__ = abc.ABCMeta
ROUTE_FAMILY = RF_IPv4_UC
def __init__(self, table, nlri):
# Validate arguments.
if table.route_family != self.__class__.ROUTE_FAMILY:
raise ValueError('Table and destination route family '
'do not match.')
# Back-pointer to the table that contains this destination.
self._table = table
self._core_service = table.core_service
self._nlri = nlri
# List of all known processed paths,
self._known_path_list = []
# List of new un-processed paths.
self._new_path_list = []
# Pointer to best-path. One from the the known paths.
self._best_path = None
# Reason current best path was chosen as best path.
self._best_path_reason = None
# List of withdrawn paths.
self._withdraw_list = []
# List of SentRoute objects. This is the Adj-Rib-Out for this
# destination. (key/value: peer/sent_route)
self._sent_routes = {}
# This is an (optional) list of paths that were created as a
# result of exporting this route to other tables.
# self.exported_paths = None
# Automatically generated
#
# On work queue for BGP processor.
# self.next_dest_to_process
# self.prev_dest_to_process
@property
def route_family(self):
return self.__class__.ROUTE_FAMILY
@property
def nlri(self):
return self._nlri
@property
def best_path(self):
return self._best_path
@property
def best_path_reason(self):
return self._best_path_reason
@property
def known_path_list(self):
return self._known_path_list[:]
@property
def sent_routes(self):
return self._sent_routes.values()
def add_new_path(self, new_path):
self._validate_path(new_path)
self._new_path_list.append(new_path)
def add_withdraw(self, withdraw):
self._validate_path(withdraw)
self._withdraw_list.append(withdraw)
def add_sent_route(self, sent_route):
self._sent_routes[sent_route.sent_peer] = sent_route
def remove_sent_route(self, peer):
if self.was_sent_to(peer):
del self._sent_routes[peer]
return True
return False
def was_sent_to(self, peer):
if peer in self._sent_routes.keys():
return True
return False
def _process(self):
"""Calculate best path for this destination.
A destination is processed when known paths to this destination has
changed. We might have new paths or withdrawals of last known paths.
Removes withdrawals and adds new learned paths from known path list.
Uses bgp best-path calculation algorithm on new list of known paths to
choose new best-path. Communicates best-path to core service.
"""
LOG.debug('Processing destination: %s', self)
new_best_path, reason = self._process_paths()
self._best_path_reason = reason
if self._best_path == new_best_path:
return
if new_best_path is None:
# we lost best path
assert not self._known_path_list, repr(self._known_path_list)
return self._best_path_lost()
else:
return self._new_best_path(new_best_path)
@abstractmethod
def _best_path_lost(self):
raise NotImplementedError()
@abstractmethod
def _new_best_path(self, new_best_path):
raise NotImplementedError()
@classmethod
def _validate_path(cls, path):
if not path or path.route_family != cls.ROUTE_FAMILY:
raise ValueError(
'Invalid path. Expected %s path got %s' %
(cls.ROUTE_FAMILY, path)
)
def process(self):
self._process()
if not self._known_path_list and not self._best_path:
self._remove_dest_from_table()
def _remove_dest_from_table(self):
self._table.delete_dest(self)
def remove_old_paths_from_source(self, source):
"""Removes known old paths from *source*.
Returns *True* if any of the known paths were found to be old and
removed/deleted.
"""
assert(source and hasattr(source, 'version_num'))
removed_paths = []
# Iterate over the paths in reverse order as we want to delete paths
# whose source is this peer.
source_ver_num = source.version_num
for path_idx in range(len(self._known_path_list) - 1, -1, -1):
path = self._known_path_list[path_idx]
if (path.source == source and
path.source_version_num < source_ver_num):
# If this peer is source of any paths, remove those path.
del(self._known_path_list[path_idx])
removed_paths.append(path)
return removed_paths
def withdraw_if_sent_to(self, peer):
"""Sends a withdraw for this destination to given `peer`.
Check the records if we indeed advertise this destination to given peer
and if so, creates a withdraw for advertised route and sends it to the
peer.
Parameter:
- `peer`: (Peer) peer to send withdraw to
"""
from ryu.services.protocols.bgp.peer import Peer
if not isinstance(peer, Peer):
raise TypeError('Currently we only support sending withdrawal'
' to instance of peer')
sent_route = self._sent_routes.pop(peer, None)
if not sent_route:
return False
sent_path = sent_route.path
withdraw_clone = sent_path.clone(for_withdrawal=True)
outgoing_route = OutgoingRoute(withdraw_clone)
sent_route.sent_peer.enque_outgoing_msg(outgoing_route)
return True
def _process_paths(self):
"""Calculates best-path among known paths for this destination.
Returns:
- Best path
Modifies destination's state related to stored paths. Removes withdrawn
paths from known paths. Also, adds new paths to known paths.
"""
# First remove the withdrawn paths.
# Note: If we want to support multiple paths per destination we may
# have to maintain sent-routes per path.
self._remove_withdrawals()
# Have to select best-path from available paths and new paths.
# If we do not have any paths, then we no longer have best path.
if not self._known_path_list and len(self._new_path_list) == 1:
# If we do not have any old but one new path
# it becomes best path.
self._known_path_list.append(self._new_path_list[0])
del(self._new_path_list[0])
return self._known_path_list[0], BPR_ONLY_PATH
# If we have a new version of old/known path we use it and delete old
# one.
self._remove_old_paths()
# Collect all new paths into known paths.
self._known_path_list.extend(self._new_path_list)
# Clear new paths as we copied them.
del(self._new_path_list[:])
# If we do not have any paths to this destination, then we do not have
# new best path.
if not self._known_path_list:
return None, BPR_UNKNOWN
# Compute new best path
current_best_path, reason = self._compute_best_known_path()
return current_best_path, reason
def _remove_withdrawals(self):
"""Removes withdrawn paths.
Note:
We may have disproportionate number of withdraws compared to know paths
since not all paths get installed into the table due to bgp policy and
we can receive withdraws for such paths and withdrawals may not be
stopped by the same policies.
"""
LOG.debug('Removing %s withdrawals', len(self._withdraw_list))
# If we have no withdrawals, we have nothing to do.
if not self._withdraw_list:
return
# If we have some withdrawals and no know-paths, it means it is safe to
# delete these withdraws.
if not self._known_path_list:
LOG.debug('Found %s withdrawals for path(s) that did not get'
' installed.', len(self._withdraw_list))
del(self._withdraw_list[:])
return
# If we have some known paths and some withdrawals, we find matches and
# delete them first.
matches = set()
w_matches = set()
# Match all withdrawals from destination paths.
for withdraw in self._withdraw_list:
match = None
for path in self._known_path_list:
# We have a match if the source are same.
if path.source == withdraw.source:
match = path
matches.add(path)
w_matches.add(withdraw)
# One withdraw can remove only one path.
break
# We do no have any match for this withdraw.
if not match:
LOG.debug('No matching path for withdraw found, may be path '
'was not installed into table: %s',
withdraw)
# If we have partial match.
if len(matches) != len(self._withdraw_list):
LOG.debug('Did not find match for some withdrawals. Number of '
'matches(%s), number of withdrawals (%s)',
len(matches), len(self._withdraw_list))
# Clear matching paths and withdrawals.
for match in matches:
self._known_path_list.remove(match)
for w_match in w_matches:
self._withdraw_list.remove(w_match)
def _remove_old_paths(self):
"""Identifies which of known paths are old and removes them.
Known paths will no longer have paths whose new version is present in
new paths.
"""
new_paths = self._new_path_list
known_paths = self._known_path_list
for new_path in new_paths:
old_paths = []
for path in known_paths:
# Here we just check if source is same and not check if path
# version num. as new_paths are implicit withdrawal of old
# paths and when doing RouteRefresh (not EnhancedRouteRefresh)
# we get same paths again.
if new_path.source == path.source:
old_paths.append(path)
break
for old_path in old_paths:
known_paths.remove(old_path)
LOG.debug('Implicit withdrawal of old path, since we have'
' learned new path from same source: %s', old_path)
def _compute_best_known_path(self):
"""Computes the best path among known paths.
Returns current best path among `known_paths`.
"""
if not self._known_path_list:
from ryu.services.protocols.bgp.processor import BgpProcessorError
raise BgpProcessorError(desc='Need at-least one known path to'
' compute best path')
# We pick the first path as current best path. This helps in breaking
# tie between two new paths learned in one cycle for which best-path
# calculation steps lead to tie.
current_best_path = self._known_path_list[0]
best_path_reason = BPR_ONLY_PATH
for next_path in self._known_path_list[1:]:
from ryu.services.protocols.bgp.processor import compute_best_path
# Compare next path with current best path.
new_best_path, reason = \
compute_best_path(self._core_service.asn, current_best_path,
next_path)
best_path_reason = reason
if new_best_path is not None:
current_best_path = new_best_path
return current_best_path, best_path_reason
def withdraw_unintresting_paths(self, interested_rts):
"""Withdraws paths that are no longer interesting.
For all known paths that do not have any route target in common with
given `interested_rts` we add a corresponding withdraw.
Returns True if we added any withdraws.
"""
add_withdraws = False
for path in self._known_path_list:
if not path.has_rts_in(interested_rts):
self.withdraw_path(path)
add_withdraws = True
return add_withdraws
def withdraw_path(self, path):
if path not in self.known_path_list:
raise ValueError("Path not known, no need to withdraw")
withdraw = path.clone(for_withdrawal=True)
self._withdraw_list.append(withdraw)
def to_dict(self):
return {'table': str(self._table),
'nlri': str(self._nlri),
'paths': self._known_path_list[:],
'withdraws': self._get_num_withdraws()}
def __str__(self):
return ('Destination(table: %s, nlri: %s, paths: %s, withdraws: %s,'
' new paths: %s)' % (self._table, str(self._nlri),
len(self._known_path_list),
len(self._withdraw_list),
len(self._new_path_list)))
def _get_num_valid_paths(self):
return len(self._known_path_list)
def _get_num_withdraws(self):
return len(self._withdraw_list)
def sent_routes_by_peer(self, peer):
"""get sent routes corresponding to specified peer.
Returns SentRoute list.
"""
result = []
for route in self._sent_routes.values():
if route.sent_peer == peer:
result.append(route)
return result
class Path(object):
"""Represents a way of reaching an IP destination.
Also contains other meta-data given to us by a specific source (such as a
peer).
"""
__metaclass__ = ABCMeta
__slots__ = ('_source', '_path_attr_map', '_nlri', '_source_version_num',
'_exported_from', '_nexthop', 'next_path', 'prev_path',
'_is_withdraw', 'med_set_by_target_neighbor')
ROUTE_FAMILY = RF_IPv4_UC
def __init__(self, source, nlri, src_ver_num, pattrs=None, nexthop=None,
is_withdraw=False, med_set_by_target_neighbor=False):
"""Initializes Ipv4 path.
If this path is not a withdraw, then path attribute and nexthop both
should be provided.
Parameters:
- `source`: (Peer/str) source of this path.
- `nlri`: (Vpnv4) Nlri instance for Vpnv4 route family.
- `src_ver_num`: (int) version number of *source* when this path
was learned.
- `pattrs`: (OrderedDict) various path attributes for this path.
- `nexthop`: (str) nexthop advertised for this path.
- `is_withdraw`: (bool) True if this represents a withdrawal.
"""
self.med_set_by_target_neighbor = med_set_by_target_neighbor
if nlri.ROUTE_FAMILY != self.__class__.ROUTE_FAMILY:
raise ValueError('NLRI and Path route families do not'
' match (%s, %s).' %
(nlri.ROUTE_FAMILY, self.__class__.ROUTE_FAMILY))
# Currently paths injected directly into VRF has only one source
# src_peer can be None to denote NC else has to be instance of Peer.
# Paths can be exported from one VRF and then imported into another
# VRF, in such cases it source is denoted as string VPN_TABLE.
if not (source is None or
hasattr(source, 'version_num') or
source in (VRF_TABLE, VPN_TABLE)):
raise ValueError('Invalid or Unsupported source for path: %s' %
source)
# If this path is not a withdraw path, than it should have path-
# attributes and nexthop.
if not is_withdraw and not (pattrs and nexthop):
raise ValueError('Need to provide nexthop and patattrs '
'for path that is not a withdraw.')
# The entity (peer) that gave us this path.
self._source = source
# Path attribute of this path.
if pattrs:
self._path_attr_map = copy(pattrs)
else:
self._path_attr_map = OrderedDict()
# NLRI that this path represents.
self._nlri = nlri
# If given nlri is withdrawn.
self._is_withdraw = is_withdraw
# @see Source.version_num
self._source_version_num = src_ver_num
self._nexthop = nexthop
# Automatically generated.
#
# self.next_path
# self.prev_path
# The Destination from which this path was exported, if any.
self._exported_from = None
@property
def source_version_num(self):
return self._source_version_num
@property
def source(self):
return self._source
@property
def route_family(self):
return self.__class__.ROUTE_FAMILY
@property
def nlri(self):
return self._nlri
@property
def is_withdraw(self):
return self._is_withdraw
@property
def pathattr_map(self):
return copy(self._path_attr_map)
@property
def nexthop(self):
return self._nexthop
def get_pattr(self, pattr_type, default=None):
"""Returns path attribute of given type.
Returns None if we do not attribute of type *pattr_type*.
"""
return self._path_attr_map.get(pattr_type, default)
def clone(self, for_withdrawal=False):
pathattrs = None
if not for_withdrawal:
pathattrs = self.pathattr_map
clone = self.__class__(
self.source,
self.nlri,
self.source_version_num,
pattrs=pathattrs,
nexthop=self.nexthop,
is_withdraw=for_withdrawal
)
return clone
def get_rts(self):
extcomm_attr = self._path_attr_map.get(
BGP_ATTR_TYPE_EXTENDED_COMMUNITIES)
if extcomm_attr is None:
rts = []
else:
rts = extcomm_attr.rt_list
return rts
def has_rts_in(self, interested_rts):
"""Returns True if this `Path` has any `ExtCommunity` attribute
route target common with `interested_rts`.
"""
assert isinstance(interested_rts, set)
curr_rts = self.get_rts()
# Add default RT to path RTs so that we match interest for peers who
# advertised default RT
curr_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)
return not interested_rts.isdisjoint(curr_rts)
def __str__(self):
return (
'Path(source: %s, nlri: %s, source ver#: %s, '
'path attrs.: %s, nexthop: %s, is_withdraw: %s)' %
(
self._source, self._nlri, self._source_version_num,
self._path_attr_map, self._nexthop, self._is_withdraw
)
)
def __repr__(self):
return ('Path(%s, %s, %s, %s, %s, %s)' % (
self._source, self._nlri, self._source_version_num,
self._path_attr_map, self._nexthop, self._is_withdraw))
class Filter(object):
"""Represents a general filter for in-bound and out-bound filter
================ ==================================================
Attribute Description
================ ==================================================
policy Filter.POLICY_PERMIT or Filter.POLICY_DENY
================ ==================================================
"""
__metaclass__ = ABCMeta
ROUTE_FAMILY = RF_IPv4_UC
POLICY_DENY = 0
POLICY_PERMIT = 1
def __init__(self, policy=POLICY_DENY):
self._policy = policy
@property
def policy(self):
return self._policy
@abstractmethod
def evaluate(self, path):
""" This method evaluates the path.
Returns this object's policy and the result of matching.
If the specified prefix matches this object's prefix and
ge and le condition,
this method returns True as the matching result.
``path`` specifies the path. prefix must be string.
"""
raise NotImplementedError()
@abstractmethod
def clone(self):
""" This method clones Filter object.
Returns Filter object that has the same values with the original one.
"""
raise NotImplementedError()
class PrefixFilter(Filter):
"""
used to specify a prefix for filter.
We can create PrefixFilter object as follows.
prefix_filter = PrefixFilter('10.5.111.0/24',
policy=PrefixFilter.POLICY_PERMIT)
================ ==================================================
Attribute Description
================ ==================================================
prefix A prefix used for this filter
policy PrefixFilter.POLICY.PERMIT or PrefixFilter.POLICY_DENY
ge Prefix length that will be applied to this filter.
ge means greater than or equal.
le Prefix length that will be applied to this filter.
le means less than or equal.
================ ==================================================
For example, when PrefixFilter object is created as follows:
* p = PrefixFilter('10.5.111.0/24',
policy=PrefixFilter.POLICY_DENY,
ge=26, le=28)
prefixes which match 10.5.111.0/24 and its length matches
from 26 to 28 will be filtered.
When this filter is used as an out-filter, it will stop sending
the path to neighbor because of POLICY_DENY.
When this filter is used as in-filter, it will stop importing the path
to the global rib because of POLICY_DENY.
If you specify POLICY_PERMIT, the path is sent to neighbor or imported to
the global rib.
If you don't want to send prefixes 10.5.111.64/26 and 10.5.111.32/27
and 10.5.111.16/28, and allow to send other 10.5.111.0's prefixes,
you can do it by specifying as follows;
* p = PrefixFilter('10.5.111.0/24',
policy=PrefixFilter.POLICY_DENY,
ge=26, le=28).
"""
def __init__(self, prefix, policy, ge=None, le=None):
super(PrefixFilter, self).__init__(policy)
self._prefix = prefix
self._network = netaddr.IPNetwork(prefix)
self._ge = ge
self._le = le
def __cmp__(self, other):
return cmp(self.prefix, other.prefix)
def __repr__(self):
policy = 'PERMIT' \
if self._policy == self.POLICY_PERMIT else 'DENY'
return 'PrefixFilter(prefix=%s,policy=%s,ge=%s,le=%s)'\
% (self._prefix, policy, self._ge, self._le)
@property
def prefix(self):
return self._prefix
@property
def policy(self):
return self._policy
@property
def ge(self):
return self._ge
@property
def le(self):
return self._le
def evaluate(self, path):
""" This method evaluates the prefix.
Returns this object's policy and the result of matching.
If the specified prefix matches this object's prefix and
ge and le condition,
this method returns True as the matching result.
``path`` specifies the path that has prefix.
"""
nlri = path.nlri
result = False
length = nlri.length
net = netaddr.IPNetwork(nlri.prefix)
if net in self._network:
if self._ge is None and self._le is None:
result = True
elif self._ge is None and self._le:
if length <= self._le:
result = True
elif self._ge and self._le is None:
if self._ge <= length:
result = True
elif self._ge and self._le:
if self._ge <= length <= self._le:
result = True
return self.policy, result
def clone(self):
""" This method clones PrefixFilter object.
Returns PrefixFilter object that has the same values with the
original one.
"""
return self.__class__(self.prefix,
policy=self._policy,
ge=self._ge,
le=self._le)
class ASPathFilter(Filter):
"""
used to specify a prefix for AS_PATH attribute.
We can create ASPathFilter object as follows;
* as_path_filter = ASPathFilter(65000,policy=ASPathFilter.TOP)
================ ==================================================
Attribute Description
================ ==================================================
as_number A AS number used for this filter
policy ASPathFilter.POLICY_TOP and ASPathFilter.POLICY_END,
ASPathFilter.POLICY_INCLUDE and
ASPathFilter.POLICY_NOT_INCLUDE are available.
================ ==================================================
Meaning of each policy is as follows;
* POLICY_TOP :
Filter checks if the specified AS number is at the top of
AS_PATH attribute.
* POLICY_END :
Filter checks is the specified AS number
is at the last of AS_PATH attribute.
* POLICY_INCLUDE :
Filter checks if specified AS number
exists in AS_PATH attribute
* POLICY_NOT_INCLUDE :
opposite to POLICY_INCLUDE
"""
POLICY_TOP = 2
POLICY_END = 3
POLICY_INCLUDE = 4
POLICY_NOT_INCLUDE = 5
def __init__(self, as_number, policy):
super(ASPathFilter, self).__init__(policy)
self._as_number = as_number
def __cmp__(self, other):
return cmp(self.as_number, other.as_number)
def __repr__(self):
policy = 'TOP'
if self._policy == self.POLICY_INCLUDE:
policy = 'INCLUDE'
elif self._policy == self.POLICY_NOT_INCLUDE:
policy = 'NOT_INCLUDE'
elif self._policy == self.POLICY_END:
policy = 'END'
return 'ASPathFilter(as_number=%s,policy=%s)'\
% (self._as_number, policy)
@property
def as_number(self):
return self._as_number
@property
def policy(self):
return self._policy
def evaluate(self, path):
""" This method evaluates as_path list.
Returns this object's policy and the result of matching.
If the specified AS number matches this object's AS number
according to the policy,
this method returns True as the matching result.
``path`` specifies the path.
"""
path_aspath = path.pathattr_map.get(BGP_ATTR_TYPE_AS_PATH)
path_seg_list = path_aspath.path_seg_list
if path_seg_list:
path_seg = path_seg_list[0]
else:
path_seg = []
result = False
LOG.debug("path_seg : %s", path_seg)
if self.policy == ASPathFilter.POLICY_TOP:
if len(path_seg) > 0 and path_seg[0] == self._as_number:
result = True
elif self.policy == ASPathFilter.POLICY_INCLUDE:
for aspath in path_seg:
LOG.debug("POLICY_INCLUDE as_number : %s", aspath)
if aspath == self._as_number:
result = True
break
elif self.policy == ASPathFilter.POLICY_END:
if len(path_seg) > 0 and path_seg[-1] == self._as_number:
result = True
elif self.policy == ASPathFilter.POLICY_NOT_INCLUDE:
if self._as_number not in path_seg:
result = True
return self.policy, result
def clone(self):
""" This method clones ASPathFilter object.
Returns ASPathFilter object that has the same values with the
original one.
"""
return self.__class__(self._as_number,
policy=self._policy)
class AttributeMap(object):
"""
This class is used to specify an attribute to add if the path matches
filters.
We can create AttributeMap object as follows;
pref_filter = PrefixFilter('192.168.103.0/30',
PrefixFilter.POLICY_PERMIT)
attribute_map = AttributeMap([pref_filter],
AttributeMap.ATTR_LOCAL_PREF, 250)
speaker.attribute_map_set('192.168.50.102', [attribute_map])
AttributeMap.ATTR_LOCAL_PREF means that 250 is set as a
local preference value if nlri in the path matches pref_filter.
ASPathFilter is also available as a filter. ASPathFilter checks if AS_PATH
attribute in the path matches AS number in the filter.
=================== ==================================================
Attribute Description
=================== ==================================================
filters A list of filter.
Each object should be a Filter class or its sub-class
attr_type A type of attribute to map on filters. Currently
AttributeMap.ATTR_LOCAL_PREF is available.
attr_value A attribute value
=================== ==================================================
"""
ATTR_LOCAL_PREF = '_local_pref'
def __init__(self, filters, attr_type, attr_value):
assert all(isinstance(f, Filter) for f in filters),\
'all the items in filters must be an instance of Filter sub-class'
self.filters = filters
self.attr_type = attr_type
self.attr_value = attr_value
def evaluate(self, path):
""" This method evaluates attributes of the path.
Returns the cause and result of matching.
Both cause and result are returned from filters
that this object contains.
``path`` specifies the path.
"""
result = False
cause = None
for f in self.filters:
cause, result = f.evaluate(path)
if not result:
break
return cause, result
def get_attribute(self):
func = getattr(self, 'get' + self.attr_type)
return func()
def get_local_pref(self):
local_pref_attr = BGPPathAttributeLocalPref(value=self.attr_value)
return local_pref_attr
def clone(self):
""" This method clones AttributeMap object.
Returns AttributeMap object that has the same values with the
original one.
"""
cloned_filters = [f.clone() for f in self.filters]
return self.__class__(cloned_filters, self.attr_type, self.attr_value)
def __repr__(self):
attr_type = 'LOCAL_PREF'\
if self.attr_type == self.ATTR_LOCAL_PREF else None
filter_string = ','.join(repr(f) for f in self.filters)
return 'AttributeMap(filters=[%s],attribute_type=%s,attribute_value=%s)'\
% (filter_string, attr_type, self.attr_value)
| 0.000024 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: hpilo_facts
version_added: "2.3"
author: Dag Wieers (@dagwieers)
short_description: Gather facts through an HP iLO interface
description:
- This module gathers facts for a specific system using its HP iLO interface.
These facts include hardware and network related information useful
for provisioning (e.g. macaddress, uuid).
- This module requires the hpilo python module.
options:
host:
description:
- The HP iLO hostname/address that is linked to the physical system.
required: true
login:
description:
- The login name to authenticate to the HP iLO interface.
default: Administrator
password:
description:
- The password to authenticate to the HP iLO interface.
default: admin
ssl_version:
description:
- Change the ssl_version used.
default: TLSv1
choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
version_added: '2.4'
requirements:
- hpilo
notes:
- This module ought to be run from a system that can access the HP iLO
interface directly, either by using C(local_action) or using C(delegate_to).
'''
EXAMPLES = r'''
# Task to gather facts from a HP iLO interface only if the system is an HP server
- hpilo_facts:
host: YOUR_ILO_ADDRESS
login: YOUR_ILO_LOGIN
password: YOUR_ILO_PASSWORD
when: cmdb_hwmodel.startswith('HP ')
delegate_to: localhost
- fail:
msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ hw_system_serial }}) !'
when: cmdb_serialno != hw_system_serial
'''
RETURN = r'''
# Typical output of HP iLO_facts for a physical system
hw_bios_date:
description: BIOS date
returned: always
type: str
sample: 05/05/2011
hw_bios_version:
description: BIOS version
returned: always
type: str
sample: P68
hw_ethX:
description: Interface information (for each interface)
returned: always
type: dict
sample:
- macaddress: 00:11:22:33:44:55
macaddress_dash: 00-11-22-33-44-55
hw_eth_ilo:
description: Interface information (for the iLO network interface)
returned: always
type: dict
sample:
- macaddress: 00:11:22:33:44:BA
- macaddress_dash: 00-11-22-33-44-BA
hw_product_name:
description: Product name
returned: always
type: str
sample: ProLiant DL360 G7
hw_product_uuid:
description: Product UUID
returned: always
type: str
sample: ef50bac8-2845-40ff-81d9-675315501dac
hw_system_serial:
description: System serial number
returned: always
type: str
sample: ABC12345D6
hw_uuid:
description: Hardware UUID
returned: always
type: str
sample: 123456ABC78901D2
'''
import re
import traceback
import warnings
HPILO_IMP_ERR = None
try:
import hpilo
HAS_HPILO = True
except ImportError:
HPILO_IMP_ERR = traceback.format_exc()
HAS_HPILO = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
# Suppress warnings from hpilo
warnings.simplefilter('ignore')
def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
try:
factname = 'hw_eth' + str(int(entry['Port']) - 1)
except Exception:
factname = non_numeric
facts = {
'macaddress': entry['MAC'].replace('-', ':'),
'macaddress_dash': entry['MAC']
}
return (factname, facts)
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
),
supports_check_mode=True,
)
if not HAS_HPILO:
module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
host = module.params['host']
login = module.params['login']
password = module.params['password']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
facts = {
'module_hw': True,
}
# TODO: Count number of CPUs, DIMMs and total memory
try:
data = ilo.get_host_data()
except hpilo.IloCommunicationError as e:
module.fail_json(msg=to_native(e))
for entry in data:
if 'type' not in entry:
continue
elif entry['type'] == 0: # BIOS Information
facts['hw_bios_version'] = entry['Family']
facts['hw_bios_date'] = entry['Date']
elif entry['type'] == 1: # System Information
facts['hw_uuid'] = entry['UUID']
facts['hw_system_serial'] = entry['Serial Number'].rstrip()
facts['hw_product_name'] = entry['Product Name']
facts['hw_product_uuid'] = entry['cUUID']
elif entry['type'] == 209: # Embedded NIC MAC Assignment
if 'fields' in entry:
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_eth' + str(int(value) - 1)
except Exception:
factname = 'hw_eth_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
else:
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_iscsi' + str(int(value) - 1)
except Exception:
factname = 'hw_iscsi_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
# Collect health (RAM/CPU data)
health = ilo.get_embedded_health()
facts['hw_health'] = health
memory_details_summary = health.get('memory', {}).get('memory_details_summary')
# RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
if memory_details_summary:
facts['hw_memory_details_summary'] = memory_details_summary
facts['hw_memory_total'] = 0
for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size:
ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
if ram:
if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))
# reformat into a text friendly format
facts['hw_memory_total'] = "{0} GB".format(facts['hw_memory_total'])
module.exit_json(ansible_facts=facts)
if __name__ == '__main__':
main()
| 0.002241 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from weblate.trans.models import SubProject, Project
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import cProfile
import pstats
class Command(BaseCommand):
'''
Runs simple project import to perform benchmarks.
'''
help = 'performs import benchmark'
args = '<project> <repo> <mask>'
option_list = BaseCommand.option_list + (
make_option(
'--profile-sort',
type='str',
dest='profile_sort',
default='cumulative',
help='sort order for profile stats',
),
make_option(
'--profile-count',
type='int',
dest='profile_count',
default=20,
help='number of profile stats to show',
),
)
def handle(self, *args, **options):
if len(args) < 3:
raise CommandError('Missing arguments!')
project = Project.objects.get(slug=args[0])
# Delete any possible previous tests
SubProject.objects.filter(
project=project,
slug='benchmark'
).delete()
profiler = cProfile.Profile()
subproject = profiler.runcall(
SubProject.objects.create,
name='Benchmark',
slug='benchmark',
repo=args[1],
filemask=args[2],
project=project
)
stats = pstats.Stats(profiler)
stats.sort_stats(options['profile_sort'])
stats.print_stats(options['profile_count'])
# Delete after testing
subproject.delete()
| 0 |
"""
Course API Serializers. Representing course catalog data
"""
import urllib
from django.core.urlresolvers import reverse
from rest_framework import serializers
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.lib.api.fields import AbsoluteURLField
class _MediaSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Nested serializer to represent a media object.
"""
def __init__(self, uri_attribute, *args, **kwargs):
super(_MediaSerializer, self).__init__(*args, **kwargs)
self.uri_attribute = uri_attribute
uri = serializers.SerializerMethodField(source='*')
def get_uri(self, course_overview):
"""
Get the representation for the media resource's URI
"""
return getattr(course_overview, self.uri_attribute)
class ImageSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Collection of URLs pointing to images of various sizes.
The URLs will be absolute URLs with the host set to the host of the current request. If the values to be
serialized are already absolute URLs, they will be unchanged.
"""
raw = AbsoluteURLField()
small = AbsoluteURLField()
large = AbsoluteURLField()
class _CourseApiMediaCollectionSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Nested serializer to represent a collection of media objects
"""
course_image = _MediaSerializer(source='*', uri_attribute='course_image_url')
course_video = _MediaSerializer(source='*', uri_attribute='course_video_url')
image = ImageSerializer(source='image_urls')
class CourseSerializer(serializers.Serializer): # pylint: disable=abstract-method
"""
Serializer for Course objects providing minimal data about the course.
Compare this with CourseDetailSerializer.
"""
blocks_url = serializers.SerializerMethodField()
effort = serializers.CharField()
end = serializers.DateTimeField()
enrollment_start = serializers.DateTimeField()
enrollment_end = serializers.DateTimeField()
id = serializers.CharField() # pylint: disable=invalid-name
media = _CourseApiMediaCollectionSerializer(source='*')
name = serializers.CharField(source='display_name_with_default_escaped')
number = serializers.CharField(source='display_number_with_default')
org = serializers.CharField(source='display_org_with_default')
short_description = serializers.CharField()
start = serializers.DateTimeField()
start_display = serializers.CharField()
start_type = serializers.CharField()
pacing = serializers.CharField()
# 'course_id' is a deprecated field, please use 'id' instead.
course_id = serializers.CharField(source='id', read_only=True)
def get_blocks_url(self, course_overview):
"""
Get the representation for SerializerMethodField `blocks_url`
"""
base_url = '?'.join([
reverse('blocks_in_course'),
urllib.urlencode({'course_id': course_overview.id}),
])
return self.context['request'].build_absolute_uri(base_url)
class CourseDetailSerializer(CourseSerializer): # pylint: disable=abstract-method
"""
Serializer for Course objects providing additional details about the
course.
This serializer makes additional database accesses (to the modulestore) and
returns more data (including 'overview' text). Therefore, for performance
and bandwidth reasons, it is expected that this serializer is used only
when serializing a single course, and not for serializing a list of
courses.
"""
overview = serializers.SerializerMethodField()
def get_overview(self, course_overview):
"""
Get the representation for SerializerMethodField `overview`
"""
# Note: This makes a call to the modulestore, unlike the other
# fields from CourseSerializer, which get their data
# from the CourseOverview object in SQL.
return CourseDetails.fetch_about_attribute(course_overview.id, 'overview')
| 0.002178 |
# SPDX-FileCopyrightText: 2013 SAP SE Srdjan Boskovic <srdjan.boskovic@sap.com>
#
# SPDX-License-Identifier: Apache-2.0
import inspect
import os
import sys
import subprocess
from codecs import open
from setuptools import setup, find_packages, Extension
MODULE_NAME = "pyrfc"
PYPIPACKAGE = "pyrfc"
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, "VERSION"), "rb", "utf-8") as version_file:
VERSION = version_file.read().strip()
with open(os.path.join(HERE, "README.md"), "rb", "utf-8") as readme_file:
LONG_DESCRIPTION = readme_file.read().strip()
BUILD_CYTHON = sys.platform.startswith("linux") or bool(os.getenv("PYRFC_BUILD_CYTHON"))
CMDCLASS = {}
if BUILD_CYTHON:
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
except ImportError:
sys.exit(
"Cython not installed: https://cython.readthedocs.io/en/latest/src/quickstart/install.html"
)
CMDCLASS = {"build_ext": build_ext}
# Check if SAP NWRFC SDK configured
SAPNWRFC_HOME = os.environ.get("SAPNWRFC_HOME")
if not SAPNWRFC_HOME:
sys.exit(
"Environment variable SAPNWRFC_HOME not set.\nPlease specify this variable with the root directory of the SAP NWRFC Library."
)
# https://launchpad.support.sap.com/#/notes/2573953
if sys.platform.startswith("linux"):
subprocess.call("./ci/utils/nwrfcsdk-version-linux.sh", shell=True)
LIBS = ["sapnwrfc", "sapucum"]
MACROS = [
("NDEBUG", None),
("_LARGEFILE_SOURCE", None),
("_CONSOLE", None),
("_FILE_OFFSET_BITS", 64),
("SAPonUNIX", None),
("SAPwithUNICODE", None),
("SAPwithTHREADS", None),
("SAPonLIN", None),
]
COMPILE_ARGS = [
"-Wall",
"-O2",
"-fexceptions",
"-funsigned-char",
"-fno-strict-aliasing",
"-Wall",
"-Wno-uninitialized",
"-Wno-deprecated-declarations",
"-Wno-unused-function",
"-Wcast-align",
"-fPIC",
"-pthread",
"-minline-all-stringops",
"-I{}/include".format(SAPNWRFC_HOME),
]
LINK_ARGS = ["-L{}/lib".format(SAPNWRFC_HOME)]
elif sys.platform.startswith("win"):
# https://docs.microsoft.com/en-us/cpp/build/reference/compiler-options-listed-alphabetically
# Python sources
PYTHONSOURCE = os.environ.get("PYTHONSOURCE")
if not PYTHONSOURCE:
PYTHONSOURCE = inspect.getfile(inspect).split("/inspect.py")[0]
# sys.exit('Environment variable PYTHONSOURCE not set. Please specify this variable with the root directory of the PYTHONSOURCE Library.')
subprocess.call("ci\\utils\\nwrfcsdk-version.bat", shell=True)
LIBS = ["sapnwrfc", "libsapucum"]
MACROS = [
("SAPonNT", None),
("_CRT_NON_CONFORMING_SWPRINTFS", None),
("_CRT_SECURE_NO_DEPRECATES", None),
("_CRT_NONSTDC_NO_DEPRECATE", None),
("_AFXDLL", None),
("WIN32", None),
("_WIN32_WINNT", "0x0502"),
("WIN64", None),
("_AMD64_", None),
("NDEBUG", None),
("SAPwithUNICODE", None),
("UNICODE", None),
("_UNICODE", None),
("SAPwithTHREADS", None),
("_ATL_ALLOW_CHAR_UNSIGNED", None),
("_LARGEFILE_SOURCE", None),
("_CONSOLE", None),
("SAP_PLATFORM_MAKENAME", "ntintel"),
]
COMPILE_ARGS = [
"-I{}\\include".format(SAPNWRFC_HOME),
"-I{}\\Include".format(PYTHONSOURCE),
"-I{}\\Include\\PC".format(PYTHONSOURCE),
"/EHs",
"/Gy",
"/J",
"/MD",
"/nologo",
"/W3",
"/Z7",
"/GL",
"/O2",
"/Oy-",
"/we4552",
"/we4700",
"/we4789",
]
LINK_ARGS = [
"-LIBPATH:{}\\lib".format(SAPNWRFC_HOME),
"-LIBPATH:{}\\PCbuild".format(PYTHONSOURCE),
"/NXCOMPAT",
"/STACK:0x2000000",
"/SWAPRUN:NET",
"/DEBUG",
"/OPT:REF",
"/DEBUGTYPE:CV,FIXUP",
"/MACHINE:amd64",
"/nologo",
"/LTCG",
]
elif sys.platform.startswith("darwin"):
subprocess.call("./ci/utils/nwrfcsdk-version-darwin.sh", shell=True)
MACOS_VERSION_MIN = "10.15"
LIBS = ["sapnwrfc", "sapucum"]
MACROS = [
("NDEBUG", None),
("_LARGEFILE_SOURCE", None),
("_CONSOLE", None),
("_FILE_OFFSET_BITS", 64),
("SAPonUNIX", None),
("SAPwithUNICODE", None),
("SAPwithTHREADS", None),
("SAPonDARW", None),
]
COMPILE_ARGS = [
"-Wall",
"-O2",
"-fexceptions",
"-funsigned-char",
"-fno-strict-aliasing",
"-Wno-uninitialized",
"-Wcast-align",
"-fPIC",
"-pthread",
"-minline-all-stringops",
"-isystem",
"-std=c++11",
"-mmacosx-version-min={}".format(MACOS_VERSION_MIN),
"-I{}/include".format(SAPNWRFC_HOME),
"-Wno-cast-align",
"-Wno-deprecated-declarations",
"-Wno-unused-function",
]
LINK_ARGS = [
"-L{}/lib".format(SAPNWRFC_HOME),
"-stdlib=libc++",
"-mmacosx-version-min={}".format(MACOS_VERSION_MIN),
# https://stackoverflow.com/questions/6638500/how-to-specify-rpath-in-a-makefile
"-Wl,-rpath,{}/lib".format(SAPNWRFC_HOME),
]
else:
sys.exit("Platform not supported: {}.".format(sys.platform))
# https://docs.python.org/2/distutils/apiref.html
PYRFC_EXT = Extension(
language="c++",
# https://stackoverflow.com/questions/8024805/cython-compiled-c-extension-importerror-dynamic-module-does-not-define-init-fu
name=f"{MODULE_NAME}.{MODULE_NAME}",
sources=[f"src/{MODULE_NAME}/_{MODULE_NAME}.pyx"],
define_macros=MACROS,
extra_compile_args=COMPILE_ARGS,
extra_link_args=LINK_ARGS,
libraries=LIBS,
)
# cf. http://docs.python.org/distutils/setupscript.html#additional-meta-data
setup(
name=PYPIPACKAGE,
version=VERSION,
description=("Python bindings for SAP NetWeaver RFC SDK"),
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
download_url="https://github.com/SAP/PyRFC/tarball/master",
classifiers=[ # cf. http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords=f"{MODULE_NAME} {PYPIPACKAGE} pyrfc sap rfc nwrfc sapnwrfc",
author="SAP SE",
url="https://github.com/SAP/pyrfc",
license="OSI Approved :: Apache Software License",
maintainer="Srdjan Boskovic",
maintainer_email="srdjan.boskovic@sap.com",
packages=find_packages(where="src", exclude=["*.cpp", "*.pxd", "*.html"]),
package_dir={"": "src"},
# http://packages.python.org/distribute/setuptools.html#setting-the-zip-safe-flag
zip_safe=False,
install_requires=["setuptools"],
setup_requires=["setuptools-git"],
cmdclass=CMDCLASS,
ext_modules=cythonize(PYRFC_EXT, annotate=True, language_level="3")
if BUILD_CYTHON
else [PYRFC_EXT],
test_suite=MODULE_NAME,
)
| 0.00053 |
from algorithms.array import selection
import sys, pygame, math
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
def plot(mi, q1, q2, q3, ma, outliers, ll, rl):
print("Min: ", mi,"\nQ1: " , q1, "\nQ2: ", q2, "\nQ3: ", q3, "\nMax: ", ma)
print("Outlier Bounds: ", ll, rl)
print("Outliers: ", outliers)
mi = min(mi,ll)
ma = max(ma,rl)
if(ma - mi > 0):
q1 -= mi
q2 -= mi
q3 -= mi
ma -= mi
ll -= mi
rl -= mi
outliers = list(map(lambda x: x - mi, outliers))
mi = 0
q1 /= ma
q2 /= ma
q3 /= ma
ll /= ma
rl /= ma
outliers = list(map(lambda x: x / ma, outliers))
ma = 1
q1 = 1.9 * q1 -0.95
q2 = 1.9 * q2 -0.95
q3 = 1.9 * q3 -0.95
mi = 1.9 * mi -0.95
ma = 1.9 * ma -0.95
ll = 1.9 * ll -0.95
rl = 1.9 * rl -0.95
outliers = list(map(lambda x: 1.9 * x - 0.95, outliers))
else:
q1 = q2 = q3 = mi = ma = 0
pygame.init()
display = (600, 600)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
clock = pygame.time.Clock()
framesCount = 0
a, x, y, z = 1,0,1,0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_x:
x = 1
elif event.key == pygame.K_y:
y = 1
elif event.key == pygame.K_z:
z = 1
elif event.type == pygame.KEYUP:
if event.key == pygame.K_x:
x = 0
elif event.key == pygame.K_y:
y = 0
elif event.key == pygame.K_z:
z = 0
glRotatef(a,x,y,z)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
sq(genFlat(ll))
sq(genFlat(q1))
sq(genFlat(q2))
sq(genFlat(q3))
sq(genFlat(rl))
sq((
(-0.1,-0.1,q1),
(-0.1,-0.1,q3),
))
sq((
(0.1,-0.1,q1),
(0.1,-0.1,q3),
))
sq((
(-0.1,0.1,q1),
(-0.1,0.1,q3),
))
sq((
(0.1,0.1,q1),
(0.1,0.1,q3),
))
sq((
(0,0,rl),
(0,0,ll),
))
for i in outliers:
sq(cross(i))
pygame.display.flip()
clock.tick(30)
def genFlat(a):
return (
(-0.1,-0.1,a),
(-0.1,0.1,a),
(0.1,-0.1,a),
(0.1,0.1,a),
)
def trans(v):
return (v[0],v[2],v[1])
def sq(verts):
glBegin(GL_LINES)
for i in range(len(verts)):
for j in range(i):
glVertex3fv(trans(verts[i]))
glVertex3fv(trans(verts[j]))
glEnd()
def cross(pos):
return(
(0, 0, pos + 0.01),
(0, 0, pos - 0.01),
(0.01, 0, pos),
(-0.01, 0, pos),
(0, 0.01, pos),
(0, -0.01, pos),
)
def findMedian(a):
selection.randomizedSelection(a, len(a)/2)
p = int(math.floor(len(a)/2))
if len(a)%2 == 1:
return a[:p], a[p], a[p+1:]
selection.randomizedSelection(a, p-1, r = p)
return a[:p], (a[p-1]+a[p])/2, a[p:]
def main():
data = []
if len(sys.argv) > 1:
data = list(map(int, sys.argv[1:]))
else:
inp = sys.stdin.read()
tmp = 0
for i in inp:
if i.isdigit():
tmp = 10 * tmp + int(i)
flag = True
elif(flag):
data.append(tmp)
tmp = 0
flag = False
if len(data) < 4:
data = 4 * data
l, q2, r = findMedian(data)
l, q1, _ = findMedian(l)
_, q3, r = findMedian(r)
mi = min(l)
ma = max(r)
iqr = q3 - q1
outliers = []
rl, ll = q3 + 1.5 * iqr, q1 - 1.5 * iqr
for i in data:
if i > rl or i < ll:
outliers.append(i)
plot(mi,q1,q2,q3,ma,outliers,ll, rl)
main()
| 0.037355 |
import ew as ew_core
import ew.jinja2_ew as ew
from pylons import g, c
from allura import model as M
from allura.lib.security import Credentials
class ProjectSummary(ew_core.Widget):
template='jinja:allura:templates/widgets/project_summary.html'
defaults=dict(
ew_core.Widget.defaults,
sitemap=None,
icon=None,
value=None,
icon_url=None,
accolades=None,
columns=1,
show_proj_icon=True,
show_download_button=True,
show_awards_banner=True,
grid_view_tools='')
def prepare_context(self, context):
response = super(ProjectSummary, self).prepare_context(context)
value = response['value']
if response['sitemap'] is None:
response['sitemap'] = [ s for s in value.sitemap() if s.url ]
if response['grid_view_tools'] != '':
view_tools_list = response['grid_view_tools'].split(',')
icon_tool_list = ["tool-%s" % vt.lower() for vt in view_tools_list]
old_sitemap = response['sitemap']
response['sitemap'] = []
for sm in old_sitemap:
if sm.ui_icon is not None and sm.ui_icon.lower() in icon_tool_list:
response['sitemap'].append(sm)
if response['icon_url'] is None:
if value.icon:
response['icon_url'] = value.url()+'icon'
if response['accolades'] is None:
response['accolades'] = value.accolades
if type(response['columns']) == unicode:
response['columns'] = int(response['columns'])
true_list = ['true', 't', '1', 'yes', 'y']
if type(response['show_proj_icon']) == unicode:
if response['show_proj_icon'].lower() in true_list:
response['show_proj_icon'] = True
else:
response['show_proj_icon'] = False
if type(response['show_download_button']) == unicode:
if response['show_download_button'].lower() in true_list:
response['show_download_button'] = True
else:
response['show_download_button'] = False
if type(response['show_awards_banner']) == unicode:
if response['show_awards_banner'].lower() in true_list:
response['show_awards_banner'] = True
else:
response['show_awards_banner'] = False
return response
def resources(self):
yield ew.JSLink('js/jquery.tools.min.js')
yield ew.JSScript('''
$(document).ready(function () {
var badges = $('small.badge');
var i = badges.length;
while (i) {
i--;
var tipHolder = document.createElement('div');
tipHolder.id = "tip" + i;
tipHolder.className = "tip";
document.body.appendChild(tipHolder);
$(badges[i]).parent('a[title]').tooltip({
tip: '#tip' + i,
opacity: '.9',
offset: [-10, 0]
});
}
});
''')
class ProjectList(ew_core.Widget):
template='jinja:allura:templates/widgets/project_list_widget.html'
defaults=dict(
ew_core.Widget.defaults,
projects=[],
project_summary=ProjectSummary(),
display_mode='list',
sitemaps=None,
icon_urls=None,
accolades_index=None,
columns=1,
show_proj_icon=True,
show_download_button=True,
show_awards_banner=True,
grid_view_tools='')
def prepare_context(self, context):
response = super(ProjectList, self).prepare_context(context)
cred = Credentials.get()
projects = response['projects']
cred.load_user_roles(c.user._id, *[p._id for p in projects])
cred.load_project_roles(*[p._id for p in projects])
if response['sitemaps'] is None:
response['sitemaps'] = M.Project.menus(projects)
if response['icon_urls'] is None:
response['icon_urls'] = M.Project.icon_urls(projects)
if response['accolades_index'] is None:
response['accolades_index'] = M.Project.accolades_index(projects)
if type(response['columns']) == unicode:
response['columns'] = int(response['columns'])
true_list = ['true', 't', '1', 'yes', 'y']
if type(response['show_proj_icon']) == unicode:
if response['show_proj_icon'].lower() in true_list:
response['show_proj_icon'] = True
else:
response['show_proj_icon'] = False
if type(response['show_download_button']) == unicode:
if response['show_download_button'].lower() in true_list:
response['show_download_button'] = True
else:
response['show_download_button'] = False
if type(response['show_awards_banner']) == unicode:
if response['show_awards_banner'].lower() in true_list:
response['show_awards_banner'] = True
else:
response['show_awards_banner'] = False
return response
def resources(self):
for r in self.project_summary.resources():
yield r
class ProjectScreenshots(ew_core.Widget):
template='jinja:allura:templates/widgets/project_screenshots.html'
defaults=dict(
ew_core.Widget.defaults,
project=None,
edit=False)
| 0.002183 |
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron import manager
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'network_profiles': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:regex': attributes.UUID_PATTERN},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'segment_type': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': ''},
'sub_type': {'allow_post': True, 'allow_put': False,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED},
'segment_range': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'multicast_ip_range': {'allow_post': True, 'allow_put': True,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED},
'multicast_ip_index': {'allow_post': False, 'allow_put': False,
'is_visible': False, 'default': '0'},
'physical_network': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'is_visible': False, 'default': ''},
'add_tenants': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': attributes.convert_none_to_empty_list},
'remove_tenants': {
'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': attributes.convert_none_to_empty_list,
},
},
'network_profile_bindings': {
'profile_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:regex': attributes.UUID_PATTERN},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'is_visible': True},
},
}
class Network_profile(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Cisco N1kv Network Profiles"
@classmethod
def get_alias(cls):
return 'network_profile'
@classmethod
def get_description(cls):
return ("Profile includes the type of profile for N1kv")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/n1kv/network-profile/api/v2.0"
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Extended Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
for resource_name in ['network_profile', 'network_profile_binding']:
collection_name = resource_name + "s"
controller = base.create_resource(
collection_name,
resource_name,
plugin,
RESOURCE_ATTRIBUTE_MAP.get(collection_name))
ex = extensions.ResourceExtension(collection_name,
controller)
exts.append(ex)
return exts
| 0 |
import sys
from io import BytesIO
from os.path import isfile
from pathlib import Path
from platform import mac_ver
from subprocess import call
from AppKit import NSBitmapImageRep
from AppKit import NSData
from AppKit import NSImage
from AppKit import NSMakeSize
from Foundation import NSUserNotification
from Foundation import NSUserNotificationCenter
from mutagen import File
from PIL import Image
from cmus_osx.constants import CMUS_OSX_FOLDER_NAME
from cmus_osx.constants import CONFIG_NAME
from cmus_osx.constants import ENV
from cmus_osx.constants import ENV_VAR_PREFIX
from cmus_osx.env import build_env
from cmus_osx.util import locate_cmus_base_path
from cmus_osx.util import source_env_file
def exception_hook(exc_type, exc_value, exc_traceback):
call(["cmus-remote", "--raw", "echo cmus-osx error: %s" % str(exc_value)])
sys.excepthook = exception_hook
cmus_base_path = locate_cmus_base_path()
if cmus_base_path is not None:
source_env_file(cmus_base_path / CMUS_OSX_FOLDER_NAME / CONFIG_NAME)
# Use defaults values if config file can't be located
env = build_env(ENV_VAR_PREFIX, ENV)
status_raw = sys.argv[1:]
status = dict(zip(status_raw[0::2], status_raw[1::2]))
# Quickly exit if paused
if "status" in status:
if not env.notification_on_pause:
if status["status"] != "playing":
exit(0)
cover = None
if "url" in status:
status["status"] = "(streaming ...)"
# the title may contain both the artist and the song name
if "title" in status:
title_pair = status["title"].split(" - ")
if len(title_pair) > 1:
status["artist"] = title_pair[0]
status["title"] = title_pair[1]
else:
status["title"] = status["url"]
elif "file" in status and isfile(status["file"]):
file = File(status["file"])
if file is not None:
# id3
if "APIC:" in file:
cover = file["APIC:"]
cover = cover.data
# mp4
elif "covr" in file:
covers = file["covr"]
if len(covers) > 0:
cover = covers[0]
# flac
elif hasattr(file, "pictures") and len(file.pictures) > 0:
cover = file.pictures[0].data
if env.notification_on_pause:
title = "cmus %s" % status["status"]
else:
title = "cmus"
subtitle = ""
message = ""
if "tracknumber" in status and status["tracknumber"].isnumeric():
subtitle += "%s. " % status["tracknumber"]
if "title" in status:
subtitle += status["title"]
if "artist" in status:
message += status["artist"]
elif "albumartist" in status:
message += status["albumartist"]
if "album" in status:
message += " – %s" % status["album"]
if "date" in status and status["date"].isnumeric():
message += " (%s)" % status["date"]
# If no metadata is found, use filename instead
if not subtitle or not message:
filename = Path(status["file"]).name
subtitle = filename
center = NSUserNotificationCenter.defaultUserNotificationCenter()
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
notification.setSubtitle_(subtitle)
notification.setInformativeText_(message)
# To-Do: Data allocation currently doesn't work in Catalina
if mac_ver()[0] != "10.15":
if cover is not None: # the song has an embedded cover image
data = NSData.alloc().initWithBytes_length_(cover, len(cover))
image_rep = NSBitmapImageRep.alloc().initWithData_(data)
# CGImageGetWidth started returning bogus values in macOS 10.14 ->
# Use Pillow to extract the image dimensions
size = NSMakeSize(*Image.open(BytesIO(cover)).size)
image = NSImage.alloc().initWithSize_(size)
image.addRepresentation_(image_rep)
if env.itunes_style_notification:
notification.setValue_forKey_(image, "_identityImage")
else:
notification.setValue_forKey_(
NSImage.alloc().initByReferencingFile_(str(env.app_icon)),
"_identityImage",
)
notification.setContentImage_(image)
else: # song has no cover image, show an icon
notification.setValue_forKey_(
NSImage.alloc().initByReferencingFile_(str(env.app_icon)), "_identityImage"
)
center.removeAllDeliveredNotifications()
center.deliverNotification_(notification)
| 0.00023 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import re
from yolo.net.net import Net
class YoloNet(Net):
def __init__(self, common_params, net_params, test=False):
"""
common params: a params dict
net_params : a params dict
"""
super(YoloNet, self).__init__(common_params, net_params)
#process params
self.image_size = int(common_params['image_size'])
self.num_classes = int(common_params['num_classes'])
self.cell_size = int(net_params['cell_size'])
self.boxes_per_cell = int(net_params['boxes_per_cell'])
self.batch_size = int(common_params['batch_size'])
self.weight_decay = float(net_params['weight_decay'])
if not test:
self.object_scale = float(net_params['object_scale'])
self.noobject_scale = float(net_params['noobject_scale'])
self.class_scale = float(net_params['class_scale'])
self.coord_scale = float(net_params['coord_scale'])
def inference(self, images):
"""Build the yolo model
Args:
images: 4-D tensor [batch_size, image_height, image_width, channels]
Returns:
predicts: 4-D tensor [batch_size, cell_size, cell_size, num_classes + 5 * boxes_per_cell]
"""
conv_num = 1
temp_conv = self.conv2d('conv' + str(conv_num), images, [7, 7, 3, 64], stride=2)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [3, 3, 64, 192], stride=1)
conv_num += 1
temp_pool = self.max_pool(temp_conv, [2, 2], 2)
temp_conv = self.conv2d('conv' + str(conv_num), temp_pool, [1, 1, 192, 128], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 128, 256], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [1, 1, 256, 256], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 256, 512], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
for i in range(4):
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [1, 1, 512, 256], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 256, 512], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [1, 1, 512, 512], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 512, 1024], stride=1)
conv_num += 1
temp_conv = self.max_pool(temp_conv, [2, 2], 2)
for i in range(2):
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [1, 1, 1024, 512], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 512, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=2)
conv_num += 1
#
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
temp_conv = self.conv2d('conv' + str(conv_num), temp_conv, [3, 3, 1024, 1024], stride=1)
conv_num += 1
#Fully connected layer
local1 = self.local('local1', temp_conv, 49 * 1024, 4096)
local1 = tf.nn.dropout(local1, keep_prob=0.5)
local2 = self.local('local2', local1, 4096, self.cell_size * self.cell_size * ( self.num_classes + 5 * self.boxes_per_cell), leaky=False)
local2 = tf.reshape(local2, [tf.shape(local2)[0], self.cell_size, self.cell_size, self.num_classes + 5 * self.boxes_per_cell])
predicts = local2
return predicts
def iou(self, boxes1, boxes2):
"""calculate ious
Args:
boxes1: 4-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h)
boxes2: 1-D tensor [4] ===> (x_center, y_center, w, h)
Return:
iou: 3-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
"""
boxes1 = tf.pack([boxes1[:, :, :, 0] - boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] - boxes1[:, :, :, 3] / 2,
boxes1[:, :, :, 0] + boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] + boxes1[:, :, :, 3] / 2])
boxes1 = tf.transpose(boxes1, [1, 2, 3, 0])
boxes2 = tf.pack([boxes2[0] - boxes2[2] / 2, boxes2[1] - boxes2[3] / 2,
boxes2[0] + boxes2[2] / 2, boxes2[1] + boxes2[3] / 2])
#calculate the left up point
lu = tf.maximum(boxes1[:, :, :, 0:2], boxes2[0:2])
rd = tf.minimum(boxes1[:, :, :, 2:], boxes2[2:])
#intersection
intersection = rd - lu
inter_square = intersection[:, :, :, 0] * intersection[:, :, :, 1]
mask = tf.cast(intersection[:, :, :, 0] > 0, tf.float32) * tf.cast(intersection[:, :, :, 1] > 0, tf.float32)
inter_square = mask * inter_square
#calculate the boxs1 square and boxs2 square
square1 = (boxes1[:, :, :, 2] - boxes1[:, :, :, 0]) * (boxes1[:, :, :, 3] - boxes1[:, :, :, 1])
square2 = (boxes2[2] - boxes2[0]) * (boxes2[3] - boxes2[1])
return inter_square/(square1 + square2 - inter_square + 1e-6)
def cond1(self, num, object_num, loss, predict, label, nilboy):
"""
if num < object_num
"""
return num < object_num
def body1(self, num, object_num, loss, predict, labels, nilboy):
"""
calculate loss
Args:
predict: 3-D tensor [cell_size, cell_size, 5 * boxes_per_cell]
labels : [max_objects, 5] (x_center, y_center, w, h, class)
"""
label = labels[num:num+1, :]
label = tf.reshape(label, [-1])
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
min_x = (label[0] - label[2] / 2) / (self.image_size / self.cell_size)
max_x = (label[0] + label[2] / 2) / (self.image_size / self.cell_size)
min_y = (label[1] - label[3] / 2) / (self.image_size / self.cell_size)
max_y = (label[1] + label[3] / 2) / (self.image_size / self.cell_size)
min_x = tf.floor(min_x)
min_y = tf.floor(min_y)
max_x = tf.ceil(max_x)
max_y = tf.ceil(max_y)
temp = tf.cast(tf.stack([max_y - min_y, max_x - min_x]), dtype=tf.int32)
objects = tf.ones(temp, tf.float32)
temp = tf.cast(tf.stack([min_y, self.cell_size - max_y, min_x, self.cell_size - max_x]), tf.int32)
temp = tf.reshape(temp, (2, 2))
objects = tf.pad(objects, temp, "CONSTANT")
#calculate objects tensor [CELL_SIZE, CELL_SIZE]
#calculate responsible tensor [CELL_SIZE, CELL_SIZE]
center_x = label[0] / (self.image_size / self.cell_size)
center_x = tf.floor(center_x)
center_y = label[1] / (self.image_size / self.cell_size)
center_y = tf.floor(center_y)
response = tf.ones([1, 1], tf.float32)
temp = tf.cast(tf.stack([center_y, self.cell_size - center_y - 1, center_x, self.cell_size -center_x - 1]), tf.int32)
temp = tf.reshape(temp, (2, 2))
response = tf.pad(response, temp, "CONSTANT")
#objects = response
#calculate iou_predict_truth [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
predict_boxes = predict[:, :, self.num_classes + self.boxes_per_cell:]
predict_boxes = tf.reshape(predict_boxes, [self.cell_size, self.cell_size, self.boxes_per_cell, 4])
predict_boxes = predict_boxes * [self.image_size / self.cell_size, self.image_size / self.cell_size, self.image_size, self.image_size]
base_boxes = np.zeros([self.cell_size, self.cell_size, 4])
for y in range(self.cell_size):
for x in range(self.cell_size):
#nilboy
base_boxes[y, x, :] = [self.image_size / self.cell_size * x, self.image_size / self.cell_size * y, 0, 0]
base_boxes = np.tile(np.resize(base_boxes, [self.cell_size, self.cell_size, 1, 4]), [1, 1, self.boxes_per_cell, 1])
predict_boxes = base_boxes + predict_boxes
iou_predict_truth = self.iou(predict_boxes, label[0:4])
#calculate C [cell_size, cell_size, boxes_per_cell]
C = iou_predict_truth * tf.reshape(response, [self.cell_size, self.cell_size, 1])
#calculate I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
I = iou_predict_truth * tf.reshape(response, (self.cell_size, self.cell_size, 1))
max_I = tf.reduce_max(I, 2, keep_dims=True)
I = tf.cast((I >= max_I), tf.float32) * tf.reshape(response, (self.cell_size, self.cell_size, 1))
#calculate no_I tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
no_I = tf.ones_like(I, dtype=tf.float32) - I
p_C = predict[:, :, self.num_classes:self.num_classes + self.boxes_per_cell]
#calculate truth x,y,sqrt_w,sqrt_h 0-D
x = label[0]
y = label[1]
sqrt_w = tf.sqrt(tf.abs(label[2]))
sqrt_h = tf.sqrt(tf.abs(label[3]))
#sqrt_w = tf.abs(label[2])
#sqrt_h = tf.abs(label[3])
#calculate predict p_x, p_y, p_sqrt_w, p_sqrt_h 3-D [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
p_x = predict_boxes[:, :, :, 0]
p_y = predict_boxes[:, :, :, 1]
#p_sqrt_w = tf.sqrt(tf.abs(predict_boxes[:, :, :, 2])) * ((tf.cast(predict_boxes[:, :, :, 2] > 0, tf.float32) * 2) - 1)
#p_sqrt_h = tf.sqrt(tf.abs(predict_boxes[:, :, :, 3])) * ((tf.cast(predict_boxes[:, :, :, 3] > 0, tf.float32) * 2) - 1)
#p_sqrt_w = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 2]))
#p_sqrt_h = tf.sqrt(tf.maximum(0.0, predict_boxes[:, :, :, 3]))
#p_sqrt_w = predict_boxes[:, :, :, 2]
#p_sqrt_h = predict_boxes[:, :, :, 3]
p_sqrt_w = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 2])))
p_sqrt_h = tf.sqrt(tf.minimum(self.image_size * 1.0, tf.maximum(0.0, predict_boxes[:, :, :, 3])))
#calculate truth p 1-D tensor [NUM_CLASSES]
P = tf.one_hot(tf.cast(label[4], tf.int32), self.num_classes, dtype=tf.float32)
#calculate predict p_P 3-D tensor [CELL_SIZE, CELL_SIZE, NUM_CLASSES]
p_P = predict[:, :, 0:self.num_classes]
#class_loss
class_loss = tf.nn.l2_loss(tf.reshape(objects, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#class_loss = tf.nn.l2_loss(tf.reshape(response, (self.cell_size, self.cell_size, 1)) * (p_P - P)) * self.class_scale
#object_loss
object_loss = tf.nn.l2_loss(I * (p_C - C)) * self.object_scale
#object_loss = tf.nn.l2_loss(I * (p_C - (C + 1.0)/2.0)) * self.object_scale
#noobject_loss
#noobject_loss = tf.nn.l2_loss(no_I * (p_C - C)) * self.noobject_scale
noobject_loss = tf.nn.l2_loss(no_I * (p_C)) * self.noobject_scale
#coord_loss
coord_loss = (tf.nn.l2_loss(I * (p_x - x)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_y - y)/(self.image_size/self.cell_size)) +
tf.nn.l2_loss(I * (p_sqrt_w - sqrt_w))/ self.image_size +
tf.nn.l2_loss(I * (p_sqrt_h - sqrt_h))/self.image_size) * self.coord_scale
nilboy = I
return num + 1, object_num, [loss[0] + class_loss, loss[1] + object_loss, loss[2] + noobject_loss, loss[3] + coord_loss], predict, labels, nilboy
def loss(self, predicts, labels, objects_num):
"""Add Loss to all the trainable variables
Args:
predicts: 4-D tensor [batch_size, cell_size, cell_size, 5 * boxes_per_cell]
===> (num_classes, boxes_per_cell, 4 * boxes_per_cell)
labels : 3-D tensor of [batch_size, max_objects, 5]
objects_num: 1-D tensor [batch_size]
"""
class_loss = tf.constant(0, tf.float32)
object_loss = tf.constant(0, tf.float32)
noobject_loss = tf.constant(0, tf.float32)
coord_loss = tf.constant(0, tf.float32)
loss = [0, 0, 0, 0]
for i in range(self.batch_size):
predict = predicts[i, :, :, :]
label = labels[i, :, :]
object_num = objects_num[i]
nilboy = tf.ones([7,7,2])
tuple_results = tf.while_loop(self.cond1, self.body1, [tf.constant(0), object_num, [class_loss, object_loss, noobject_loss, coord_loss], predict, label, nilboy])
for j in range(4):
loss[j] = loss[j] + tuple_results[2][j]
nilboy = tuple_results[5]
tf.add_to_collection('losses', (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size)
tf.scalar_summary('class_loss', loss[0]/self.batch_size)
tf.scalar_summary('object_loss', loss[1]/self.batch_size)
tf.scalar_summary('noobject_loss', loss[2]/self.batch_size)
tf.scalar_summary('coord_loss', loss[3]/self.batch_size)
tf.scalar_summary('weight_loss', tf.add_n(tf.get_collection('losses')) - (loss[0] + loss[1] + loss[2] + loss[3])/self.batch_size )
return tf.add_n(tf.get_collection('losses'), name='total_loss'), nilboy | 0.010914 |
import numpy as np
from matplotlib import pyplot
import spm1d
#(0) Load dataset:
dataset = spm1d.data.uv1d.t2.PlantarArchAngle()
yA,yB = dataset.get_data() #normal and fast walking
#(1) Compute confidence intervals:
alpha = 0.05
ci0 = spm1d.stats.ci_pairedsample(yA, yB, alpha, datum='difference', mu=0)
ci1 = spm1d.stats.ci_pairedsample(yA, yB, alpha, datum='meanA', mu='meanB')
ci2 = spm1d.stats.ci_pairedsample(yA, yB, alpha, datum='meanA', mu='tailB')
print( ci0 )
print( ci1 )
print( ci2 )
### compute incorrect CIs for demonstration:
ciA_bad = spm1d.stats.ci_onesample(yA, alpha)
ciB_bad = spm1d.stats.ci_onesample(yB, alpha)
#(2) Plot:
pyplot.close('all')
pyplot.figure(figsize=(14,7))
### plot means and standard deviations:
ax = pyplot.subplot(231)
spm1d.plot.plot_mean_sd(yA)
spm1d.plot.plot_mean_sd(yB, linecolor='r', facecolor='r', edgecolor='r')
spm1d.plot.legend_manual(ax, labels=['Group A', 'Group B', 'Mean', 'SD'], colors=['0.3', 'r', 'k','0.85'], linestyles=['-']*4, linewidths=[10, 10, 3, 10], loc='lower left', fontsize=10)
ax.set_title('Means and SDs')
### plot hypothesis test results:
ax = pyplot.subplot(232)
spmi = spm1d.stats.ttest_paired(yA, yB).inference(alpha, two_tailed=True)
spmi.plot(ax=ax)
spmi.plot_threshold_label()
ax.set_title('Hypothesis test')
ax.text(0.6, 0.2, 'Datum: zero\nCriterion: $t ^*$', transform=ax.transAxes)
### plot confidence interval for mean paired difference:
ax = pyplot.subplot(233)
ci0.plot(ax=ax)
ax.set_title('CI (possibility 1)')
ax.text(0.1, 0.8, 'Datum: difference\nCriterion: mu=0', transform=ax.transAxes)
### plot confidence interval for mean paired difference:
ax = pyplot.subplot(234)
ci1.plot(ax=ax)
ax.set_title('CI (possibility 2)')
ax.text(0.1, 0.4, 'Datum: meanA\nCriterion: meanB', transform=ax.transAxes)
### plot confidence interval for mean paired difference:
ax = pyplot.subplot(235)
ci2.plot(ax=ax)
ax.set_title('CI (possibility 3)')
ax.text(0.1, 0.4, 'Datum: meanA\nCriterion: tailsAB', transform=ax.transAxes)
### plot CIs computed separately for the means (INCORRECT!!!)
ax = pyplot.subplot(236)
ciA_bad.plot(ax=ax)
ciB_bad.plot(ax=ax, linecolor='r', facecolor='r', edgecolor='r', alpha=0.3)
ax.set_title('CIs computed separately for each group', size=10)
ax.text(0.1, 0.4, 'INCORRECT!!!', transform=ax.transAxes)
pyplot.show()
| 0.019786 |
import logging
from Crypto.Cipher import AES
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[0:-ord(s[-1])]
commKey = "J9WfqhH13Itsl0FoWcHxUKHyjYSwEsdG50o+pPp/jt8="
class Ugly(object):
def __init__(self,serverkey,iv=None,AESMode=AES.MODE_CBC):
self.serverkey = serverkey
self.AESMode = AESMode
self.iv=iv
def get_encrypted_server_key(self):
return self.encrypt_natural(self.serverkey)
def encrypt_natural(self,raw):
return self.encrypt(self.serverkey,raw)
def decrypt_natural(self,enc):
return self.decrypt(self.serverkey,enc)
def encrypt( self,key, raw ):
raw = pad(raw)
if self.iv is None:
iv = raw[:16]
raw=raw[16:]
else:
iv = self.iv
cipher = AES.new( key, AES.MODE_CBC, iv )
return cipher.encrypt( raw )
def decrypt( self, key, enc ):
if self.iv is None:
iv = enc[:16]
enc= enc[16:]
else:
iv = self.iv
cipher = AES.new(key, AES.MODE_CBC, self.iv )
return unpad(cipher.decrypt( enc ))
def set_communication_key(self,ck):
self.communicationKey = ck
return self.communicationKey
def get_client_key(self):
if self.communicationKey is None:
raise Error("No Communication key")
else:
self.clientKey = self.decrypt_natural(self.communicationKey)
return self.clientKey
def get_encrypted_password(self,password):
return self.encrypt(self.get_client_key(),password)
x = Ugly("53cr3t0p4ssw0rd1","MSID-Security\x00\x00\x00")
import base64
import binascii
commKey = base64.b64decode(commKey)
log.info(commKey.__class__)
log.debug("AES block size: %s"%AES.block_size)
log.debug(x)
log.debug("Encripted server key: "+base64.b64encode(x.get_encrypted_server_key()))
log.debug("Communication-key: "+x.set_communication_key(commKey))
log.debug("Client Key: "+base64.b64encode(x.get_client_key()))
encPass = x.get_encrypted_password("somePassword")
log.debug("EncPassword: %s"%encPass)
log.debug("Encrypted Passowrd: "+base64.b64encode(encPass))
| 0.017437 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Track.audio_sampling_rate'
db.alter_column(u'flicksapp_track', 'audio_sampling_rate', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
def backwards(self, orm):
# Changing field 'Track.audio_sampling_rate'
db.alter_column(u'flicksapp_track', 'audio_sampling_rate', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
models = {
u'flicksapp.country': {
'Meta': {'object_name': 'Country'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'flicksapp.file': {
'Meta': {'object_name': 'File'},
'container_format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'file_output': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_size': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'file_type': ('django.db.models.fields.CharField', [], {'default': "'V'", 'max_length': '1'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'overall_bit_rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'tracks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'file'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Track']"}),
'writing_application': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'writing_library': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'flicksapp.genre': {
'Meta': {'object_name': 'Genre'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'flicksapp.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'flicksapp.language': {
'Meta': {'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'flicksapp.movie': {
'Meta': {'object_name': 'Movie'},
'added_on': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'akas': ('flicksapp.fields.ListField', [], {}),
'cast': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'acted_in'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Person']"}),
'countries': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'movies'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Country']"}),
'directors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'directed'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Person']"}),
'favourite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'files': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'movie'", 'symmetrical': 'False', 'to': u"orm['flicksapp.File']"}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'movies'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Genre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imdb_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'imdb_sync_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'karagarga_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'movies'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Keyword']"}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'movies'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Language']"}),
'mpaa': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'on_karagarga': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plot': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'plot_outline': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'producers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'produced'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Person']"}),
'rating': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'runtime': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'seen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'trumpable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'votes': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'writers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'written'", 'symmetrical': 'False', 'to': u"orm['flicksapp.Person']"}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
u'flicksapp.person': {
'Meta': {'object_name': 'Person'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imdb_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'flicksapp.track': {
'Meta': {'object_name': 'Track'},
'audio_bit_rate_mode': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'audio_channels': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'audio_sampling_rate': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'bit_rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'codec': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'stream_size': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'track_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'track_type': ('django.db.models.fields.CharField', [], {'default': "'V'", 'max_length': '1'}),
'video_aspect_ratio': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'video_bpp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'video_frame_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'video_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'video_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'writing_library': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['flicksapp'] | 0.007539 |
# -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
try:
import pygraphviz as pgv
except ImportError:
pgv = None
is_gae = request.env.web2py_runtime_gae or False
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1', '127.0.0.1', '::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.env.http_x_forwarded_for or request.is_https:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1") and \
(request.function != 'manage'):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if request.function == 'manage':
if not 'auth' in globals() or not request.args:
redirect(URL(request.controller, 'index'))
manager_action = auth.settings.manager_actions.get(request.args(0), None)
if manager_action is None and request.args(0) == 'auth':
manager_action = dict(role=auth.settings.auth_manager_role,
heading=T('Manage Access Control'),
tables=[auth.table_user(),
auth.table_group(),
auth.table_permission()])
manager_role = manager_action.get('role', None) if manager_action else None
auth.requires_membership(manager_role)(lambda: None)()
menu = False
elif (request.application == 'admin' and not session.authorized) or \
(request.application != 'admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index',
vars=dict(send=URL(args=request.args, vars=request.vars))))
else:
response.subtitle = T('Database Administration (appadmin)')
menu = True
ignore_rw = True
response.view = 'appadmin.html'
if menu:
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
if False and request.tickets_db:
from gluon.restricted import TicketStorage
ts = TicketStorage()
ts._get_table(request.tickets_db, ts.tablename, request.application)
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
cond = False
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename, db, request=request):
keyed = hasattr(db[tablename], '_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (
request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form, table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request, db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query, ignore_common_filters=True).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
try:
is_imap = db._uri.startswith("imap://")
except (KeyError, AttributeError, TypeError):
is_imap = False
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'):
regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
step = 100
fields = []
if is_imap:
step = 3
stop = start + step
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '',
requires=IS_NOT_EMPTY(
error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '')), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value=T('submit')))),
_action=URL(r=request, args=request.args))
tb = None
if form.accepts(request.vars, formname=None):
regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query, ignore_common_filters=True).count()
if form.vars.update_check and form.vars.update_fields:
db(query, ignore_common_filters=True).update(
**eval_in_global_env('dict(%s)' % form.vars.update_fields))
response.flash = T('%s %%{row} updated', nrows)
elif form.vars.delete_check:
db(query, ignore_common_filters=True).delete()
response.flash = T('%s %%{row} deleted', nrows)
nrows = db(query, ignore_common_filters=True).count()
if is_imap:
fields = [db[table][name] for name in
("id", "uid", "created", "to",
"sender", "subject")]
if orderby:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop))
except Exception, e:
import traceback
tb = traceback.format_exc()
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'), PRE(str(e)))
# begin handle upload csv
csv_table = table or request.vars.table
if csv_table:
formcsv = FORM(str(T('or import from csv file')) + " ",
INPUT(_type='file', _name='csvfile'),
INPUT(_type='hidden', _value=csv_table, _name='table'),
INPUT(_type='submit', _value=T('import')))
else:
formcsv = None
if formcsv and formcsv.process().accepted:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception, e:
response.flash = DIV(T('unable to parse csv file'), PRE(str(e)))
# end handle upload csv
return dict(
form=form,
table=table,
start=start,
stop=stop,
step=step,
nrows=nrows,
rows=rows,
query=request.vars.query,
formcsv=formcsv,
tb=tb
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table], '_primarykey')
record = None
db[table]._common_filter = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[
0]]).select().first()
else:
record = db(db[table].id == request.args(
2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable = False
form = SQLFORM(
db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form, table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
if is_gae:
form = FORM(
P(TAG.BUTTON(T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")))
else:
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
session.flash = ""
if is_gae:
if request.vars.yes:
cache.ram.clear()
session.flash += T("Cache Cleared")
else:
clear_ram = False
clear_disk = False
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from guppy import hpy
hp = hpy()
except ImportError:
hp = False
import shelve
import os
import copy
import time
import math
from gluon import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
if is_gae:
gae_stats = cache.ram.client.get_stats()
try:
gae_stats['ratio'] = ((gae_stats['hits'] * 100) /
(gae_stats['hits'] + gae_stats['misses']))
except ZeroDivisionError:
gae_stats['ratio'] = T("?")
gae_stats['oldest'] = GetInHMS(time.time() - gae_stats['oldest_item_age'])
total.update(gae_stats)
else:
for key, value in cache.ram.storage.iteritems():
if isinstance(value, dict):
ram['hits'] = value['hit_total'] - value['misses']
ram['misses'] = value['misses']
try:
ram['ratio'] = ram['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
else:
if hp:
ram['bytes'] += hp.iso(value[1]).size
ram['objects'] += hp.iso(value[1]).count
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
folder = os.path.join(request.folder,'cache')
if not os.path.exists(folder):
os.mkdir(folder)
locker = open(os.path.join(folder, 'cache.lock'), 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
disk_storage = shelve.open(
os.path.join(folder, 'cache.shelve'))
try:
for key, value in disk_storage.items():
if isinstance(value, dict):
disk['hits'] = value['hit_total'] - value['misses']
disk['misses'] = value['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if hp:
disk['bytes'] += hp.iso(value[1]).size
disk['objects'] += hp.iso(value[1]).count
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
finally:
portalocker.unlock(locker)
locker.close()
disk_storage.close()
total['entries'] = ram['entries'] + disk['entries']
total['bytes'] = ram['bytes'] + disk['bytes']
total['objects'] = ram['objects'] + disk['objects']
total['hits'] = ram['hits'] + disk['hits']
total['misses'] = ram['misses'] + disk['misses']
total['keys'] = ram['keys'] + disk['keys']
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
if not is_gae:
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=hp != False)
def table_template(table):
from gluon.html import TR, TD, TABLE, TAG
def FONT(*args, **kwargs):
return TAG.font(*args, **kwargs)
def types(field):
f_type = field.type
if not isinstance(f_type,str):
return ' '
elif f_type == 'string':
return field.length
elif f_type == 'id':
return B('pk')
elif f_type.startswith('reference') or \
f_type.startswith('list:reference'):
return B('fk')
else:
return ' '
# This is horribe HTML but the only one graphiz understands
rows = []
cellpadding = 4
color = "#000000"
bgcolor = "#FFFFFF"
face = "Helvetica"
face_bold = "Helvetica Bold"
border = 0
rows.append(TR(TD(FONT(table, _face=face_bold, _color=bgcolor),
_colspan=3, _cellpadding=cellpadding,
_align="center", _bgcolor=color)))
for row in db[table]:
rows.append(TR(TD(FONT(row.name, _color=color, _face=face_bold),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(row.type, _color=color, _face=face),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(types(row), _color=color, _face=face),
_align="center", _cellpadding=cellpadding,
_border=border)))
return "< %s >" % TABLE(*rows, **dict(_bgcolor=bgcolor, _border=1,
_cellborder=0, _cellspacing=0)
).xml()
def bg_graph_model():
graph = pgv.AGraph(layout='dot', directed=True, strict=False, rankdir='LR')
subgraphs = dict()
for tablename in db.tables:
if hasattr(db[tablename],'_meta_graphmodel'):
meta_graphmodel = db[tablename]._meta_graphmodel
else:
meta_graphmodel = dict(group='Undefined', color='#ECECEC')
group = meta_graphmodel['group'].replace(' ', '')
if not subgraphs.has_key(group):
subgraphs[group] = dict(meta=meta_graphmodel, tables=[])
subgraphs[group]['tables'].append(tablename)
else:
subgraphs[group]['tables'].append(tablename)
graph.add_node(tablename, name=tablename, shape='plaintext',
label=table_template(tablename))
for n, key in enumerate(subgraphs.iterkeys()):
graph.subgraph(nbunch=subgraphs[key]['tables'],
name='cluster%d' % n,
style='filled',
color=subgraphs[key]['meta']['color'],
label=subgraphs[key]['meta']['group'])
for tablename in db.tables:
for field in db[tablename]:
f_type = field.type
if isinstance(f_type,str) and (
f_type.startswith('reference') or
f_type.startswith('list:reference')):
referenced_table = f_type.split()[1].split('.')[0]
n1 = graph.get_node(tablename)
n2 = graph.get_node(referenced_table)
graph.add_edge(n1, n2, color="#4C4C4C", label='')
graph.layout()
if not request.args:
response.headers['Content-Type'] = 'image/png'
return graph.draw(format='png', prog='dot')
else:
response.headers['Content-Disposition']='attachment;filename=graph.%s'%request.args(0)
if request.args(0) == 'dot':
return graph.string()
else:
return graph.draw(format=request.args(0), prog='dot')
def graph_model():
return dict(databases=databases, pgv=pgv)
def manage():
tables = manager_action['tables']
if isinstance(tables[0], str):
db = manager_action.get('db', auth.db)
db = globals()[db] if isinstance(db, str) else db
tables = [db[table] for table in tables]
if request.args(0) == 'auth':
auth.table_user()._plural = T('Users')
auth.table_group()._plural = T('Roles')
auth.table_membership()._plural = T('Memberships')
auth.table_permission()._plural = T('Permissions')
if request.extension != 'load':
return dict(heading=manager_action.get('heading',
T('Manage %(action)s') % dict(action=request.args(0).replace('_', ' ').title())),
tablenames=[table._tablename for table in tables],
labels=[table._plural.title() for table in tables])
table = tables[request.args(1, cast=int)]
formname = '%s_grid' % table._tablename
linked_tables = orderby = None
if request.args(0) == 'auth':
auth.table_group()._id.readable = \
auth.table_membership()._id.readable = \
auth.table_permission()._id.readable = False
auth.table_membership().user_id.label = T('User')
auth.table_membership().group_id.label = T('Role')
auth.table_permission().group_id.label = T('Role')
auth.table_permission().name.label = T('Permission')
if table == auth.table_user():
linked_tables=[auth.settings.table_membership_name]
elif table == auth.table_group():
orderby = 'role' if not request.args(3) or '.group_id' not in request.args(3) else None
elif table == auth.table_permission():
orderby = 'group_id'
kwargs = dict(user_signature=True, maxtextlength=1000,
orderby=orderby, linked_tables=linked_tables)
smartgrid_args = manager_action.get('smartgrid_args', {})
kwargs.update(**smartgrid_args.get('DEFAULT', {}))
kwargs.update(**smartgrid_args.get(table._tablename, {}))
grid = SQLFORM.smartgrid(table, args=request.args[:2], formname=formname, **kwargs)
return grid
| 0.002753 |
# -*- coding:utf-8 -*-
import time
import json
import pickle
import codecs
from parameters import Parameters
from store import Store
import os
from random import shuffle
__author__ = 'gree-gorey'
store = Store()
path = os.path.dirname(os.path.realpath(__file__))
def set_parameters():
global store
# считываем табличку с данными
store.read_data(path)
store.parameters = Parameters()
with codecs.open(u'lists_parameters.json', u'r', u'utf-8') as f:
parameters_from_client = json.load(f)
if int(parameters_from_client['n']) == 1:
# указываем количество листов
store.lists_number = 1
# просто фильтруем только те слова, которые в границах заданных пользователем
store.lists['list_1'] = store.create_list_from_to_choose(parameters_from_client['list1'])
# создаем хэши-счетчики равновесия для тех параметров, которые выбрал пользователь
store.list_equality_counter['list_1'] = store.create_equality_counter(parameters_from_client['list1'])
# print store.list_equality_counter
# перемешиваем лист
shuffle(store.lists['list_1'])
elif int(parameters_from_client['n']) == 2:
# создаем в сторе предварительные листы
store.lists['list_1'] = store.create_list_from_to_choose(parameters_from_client['list1'])
store.lists['list_2'] = store.create_list_from_to_choose(parameters_from_client['list2'])
if len(store.lists['list_1']) == 0 or len(store.lists['list_2']) == 0:
result = 'failure'
# создаем хэши-счетчики равновесия для тех параметров, которые выбрал пользователь
store.list_equality_counter['list_1'] = store.create_equality_counter(parameters_from_client['list1'])
store.list_equality_counter['list_2'] = store.create_equality_counter(parameters_from_client['list2'])
# нормализуем все к шкале от 0 до 1
store.normalize()
# print store.first_list[0].normalized_features
# проверяем, должны ли различаться и если да, то различаем
if parameters_from_client['differ_feature'] != 'question':
store.key_for_differ_feature = parameters_from_client['differ_feature']
store.which_higher = parameters_from_client['which_is_higher']
store.differentiate()
# print len(store.lists['list_1'])
# print store.second_list[0].name
# устанавливаем отличающийся параметр
store.parameters.differ = parameters_from_client['differ_feature']
# устанавливаем bonferroni
store.parameters.bonferroni = parameters_from_client['bonferroni']
# создаем вектор одинаковых
store.parameters.same = parameters_from_client['same_features']
# print store.same
# если листы оказались одинаковыми, нужно рандомно разделить их
store.split()
def create():
with codecs.open(u'stat_parameters.json', u'r', u'utf-8') as f:
parameters_from_client = json.load(f)
if store.lists_number == 1:
store.parameters.length = int(parameters_from_client['length'])
# собственно генерация листа
store.generate_one()
if store.success:
# создаем файлы и пакуем в архив
store.create_zip()
else:
store.parameters.length = int(parameters_from_client['length'])
store.parameters.statistics = parameters_from_client['statistics']
# устанавливаем параметры
store.setup_parameters()
# добавим отсчет времени
store.time_begin = time.time()
# собственно генерация листов
store.generate()
# print store.first_list_equality_counter
if store.success:
# создаем файлы и пакуем в архив
store.create_zip()
if __name__ == '__main__':
set_parameters()
create()
| 0.002855 |
from django.db import models
from django.utils import timezone
class Hcmeta(models.Model):
hcver = models.IntegerField(blank=True, null=True)
org_id = models.CharField(max_length=50, blank=True, null=True)
details = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = '_hcmeta'
class SfEventLog(models.Model):
table_name = models.CharField(max_length=128, blank=True, null=True)
action = models.CharField(max_length=7, blank=True, null=True)
synced_at = models.DateTimeField(blank=True, null=True)
sf_timestamp = models.DateTimeField(blank=True, null=True)
sfid = models.CharField(max_length=20, blank=True, null=True)
record = models.TextField(blank=True, null=True)
processed = models.BooleanField(null=True)
class Meta:
managed = False
db_table = '_sf_event_log'
class TriggerLog(models.Model):
txid = models.BigIntegerField(blank=True, null=True)
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
processed_at = models.DateTimeField(blank=True, null=True)
processed_tx = models.BigIntegerField(blank=True, null=True)
state = models.CharField(max_length=8, blank=True, null=True)
action = models.CharField(max_length=7, blank=True, null=True)
table_name = models.CharField(max_length=128, blank=True, null=True)
record_id = models.IntegerField(blank=True, null=True)
sfid = models.CharField(max_length=18, blank=True, null=True)
old = models.TextField(blank=True, null=True)
values = models.TextField(blank=True, null=True)
sf_result = models.IntegerField(blank=True, null=True)
sf_message = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = '_trigger_log'
class TriggerLogArchive(models.Model):
id = models.IntegerField(primary_key=True)
txid = models.BigIntegerField(blank=True, null=True)
created_at = models.DateTimeField(blank=True, null=True)
updated_at = models.DateTimeField(blank=True, null=True)
processed_at = models.DateTimeField(blank=True, null=True)
processed_tx = models.BigIntegerField(blank=True, null=True)
state = models.CharField(max_length=8, blank=True, null=True)
action = models.CharField(max_length=7, blank=True, null=True)
table_name = models.CharField(max_length=128, blank=True, null=True)
record_id = models.IntegerField(blank=True, null=True)
sfid = models.CharField(max_length=18, blank=True, null=True)
old = models.TextField(blank=True, null=True)
values = models.TextField(blank=True, null=True)
sf_result = models.IntegerField(blank=True, null=True)
sf_message = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = '_trigger_log_archive'
class SiteAccount(models.Model):
jigsaw = models.CharField(max_length=20, blank=True, null=True)
shippinglongitude = models.FloatField(blank=True, null=True)
shippingstate = models.CharField(max_length=80, blank=True, null=True)
youtubeid = models.CharField(db_column='youtubeid__c', max_length=80, blank=True, null=True)
numberofemployees = models.IntegerField(blank=True, null=True)
parent = models.ForeignKey('SiteAccount', to_field='sfid', db_column='parentid',
on_delete=models.CASCADE,
max_length=18, blank=True, null=True)
recordtypeid = models.CharField(max_length=18, blank=True, null=True)
shippingpostalcode = models.CharField(max_length=20, blank=True, null=True)
billingcity = models.CharField(max_length=40, blank=True, null=True)
billinglatitude = models.FloatField(blank=True, null=True)
accountsource = models.CharField(max_length=40, blank=True, null=True)
shippingcountry = models.CharField(max_length=80, blank=True, null=True)
lastvieweddate = models.DateTimeField(blank=True, null=True)
shippinggeocodeaccuracy = models.CharField(max_length=40, blank=True, null=True)
last_el_update = models.DateTimeField(db_column='last_el_update__c', blank=True, null=True)
name = models.CharField(max_length=255, blank=True, null=True)
site_el_raised = models.FloatField(db_column='site_el_raised__c', blank=True, null=True)
lastmodifieddate = models.DateTimeField(blank=True, null=True)
phone = models.CharField(max_length=40, blank=True, null=True)
masterrecordid = models.CharField(max_length=18, blank=True, null=True)
ownerid = models.CharField(max_length=18, blank=True, null=True)
isdeleted = models.BooleanField(null=True)
site_el_goal = models.FloatField(db_column='site_el_goal__c', blank=True, null=True)
systemmodstamp = models.DateTimeField(blank=True, null=True)
el_id = models.CharField(db_column='el_id__c', max_length=80, blank=True, null=True)
lastmodifiedbyid = models.CharField(max_length=18, blank=True, null=True)
shippingstreet = models.CharField(max_length=255, blank=True, null=True)
lastactivitydate = models.DateField(blank=True, null=True)
billingpostalcode = models.CharField(max_length=20, blank=True, null=True)
billinglongitude = models.FloatField(blank=True, null=True)
twitchid = models.CharField(db_column='twitchid__c', max_length=80, blank=True, null=True)
twitterid = models.CharField(db_column='twitterid__c', max_length=80, blank=True, null=True)
createddate = models.DateTimeField(blank=True, null=True)
billingstate = models.CharField(max_length=80, blank=True, null=True)
supplies = models.TextField(db_column='supplies__c', blank=True, null=True)
jigsawcompanyid = models.CharField(max_length=20, blank=True, null=True)
shippingcity = models.CharField(max_length=40, blank=True, null=True)
shippinglatitude = models.FloatField(blank=True, null=True)
createdbyid = models.CharField(max_length=18, blank=True, null=True)
type = models.CharField(max_length=40, blank=True, null=True)
website = models.CharField(max_length=255, blank=True, null=True)
billingcountry = models.CharField(max_length=80, blank=True, null=True)
description = models.TextField(blank=True, null=True)
billinggeocodeaccuracy = models.CharField(max_length=40, blank=True, null=True)
photourl = models.CharField(max_length=255, blank=True, null=True)
lastreferenceddate = models.DateTimeField(blank=True, null=True)
sicdesc = models.CharField(max_length=80, blank=True, null=True)
industry = models.CharField(max_length=40, blank=True, null=True)
billingstreet = models.CharField(max_length=255, blank=True, null=True)
site_email = models.CharField(db_column='site_email__c', max_length=80, blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
site_info = models.TextField(db_column='site_info__c', blank=True, null=True)
nerd_in_chief = models.CharField(db_column='nerd_in_chief__c', max_length=18, blank=True, null=True)
mayedit = models.BooleanField(null=True)
# contacturl = models.CharField(db_column='contacturl__c', max_length=1300, blank=True, null=True)
islocked = models.BooleanField(null=True)
loot_guard = models.CharField(db_column='loot_guard__c', max_length=18, blank=True, null=True)
class Meta:
managed = False
db_table = 'account'
def has_events(self):
""" Return True if this account has upcoming events """
return Event.objects.filter(event_start_date__gte=timezone.now(), site=self).count() > 0
def upcoming(self):
return self.events.filter(event_start_date__gte=timezone.now()).order_by('event_start_date').all()
def past(self):
return self.events.filter(event_start_date__lt=timezone.now()).order_by('-event_start_date').all()
class Contact(models.Model):
lastname = models.CharField(max_length=80, blank=True, null=True)
account = models.ForeignKey(SiteAccount, to_field='sfid', db_column='accountid', on_delete=models.CASCADE,
max_length=18, blank=True, null=True)
name = models.CharField(max_length=121, blank=True, null=True)
ownerid = models.CharField(max_length=18, blank=True, null=True)
department = models.CharField(max_length=80, blank=True, null=True)
extra_life_id = models.CharField(db_column='extra_life_id__c', unique=True, max_length=20, blank=True, null=True)
fragforce_org_user = models.CharField(db_column='fragforce_org_user__c', max_length=18, blank=True, null=True)
title = models.CharField(max_length=128, blank=True, null=True)
firstname = models.CharField(max_length=40, blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
def donate_link(self):
if self.extra_life_id:
return "https://www.extra-life.org/index.cfm?fuseaction=donate.participant&participantID=%d" % (
int(self.extra_life_id),
)
raise ValueError("No extra life id set for %r" % self)
class Meta:
managed = False
db_table = 'contact'
class ELHistory(models.Model):
currencyisocode = models.CharField(max_length=3, blank=True, null=True)
contact = models.ForeignKey(Contact, to_field='sfid', db_column='contact__c', on_delete=models.CASCADE,
max_length=18, blank=True, null=True)
year = models.CharField(db_column='year__c', max_length=255, blank=True, null=True)
name = models.CharField(max_length=80, blank=True, null=True)
raised = models.FloatField(db_column='raised__c', blank=True, null=True)
lastmodifieddate = models.DateTimeField(blank=True, null=True)
ownerid = models.CharField(max_length=18, blank=True, null=True)
mayedit = models.BooleanField(null=True)
isdeleted = models.BooleanField(null=True)
goal = models.FloatField(db_column='goal__c', blank=True, null=True)
systemmodstamp = models.DateTimeField(blank=True, null=True)
el_id = models.CharField(db_column='el_id__c', max_length=7, blank=True, null=True)
lastmodifiedbyid = models.CharField(max_length=18, blank=True, null=True)
islocked = models.BooleanField(null=True)
createddate = models.DateTimeField(blank=True, null=True)
createdbyid = models.CharField(max_length=18, blank=True, null=True)
site = models.ForeignKey(SiteAccount, to_field='sfid', db_column='site__c', on_delete=models.CASCADE, max_length=18,
blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
class Meta:
managed = False
db_table = 'el_history__c'
class Event(models.Model):
lastvieweddate = models.DateTimeField(blank=True, null=True)
volunteerforce_link = models.CharField(db_column='volunteerforce_link__c', max_length=255, blank=True, null=True)
name = models.CharField(max_length=80, blank=True, null=True)
event_end_date = models.DateTimeField(db_column='event_end_date__c', blank=True, null=True)
lastmodifieddate = models.DateTimeField(blank=True, null=True)
isdeleted = models.BooleanField(null=True)
systemmodstamp = models.DateTimeField(blank=True, null=True)
lastmodifiedbyid = models.CharField(max_length=18, blank=True, null=True)
lastactivitydate = models.DateField(blank=True, null=True)
event_start_date = models.DateTimeField(db_column='event_start_date__c', blank=True, null=True)
createddate = models.DateTimeField(blank=True, null=True)
createdbyid = models.CharField(max_length=18, blank=True, null=True)
site = models.ForeignKey(SiteAccount, to_field='sfid', db_column='site__c', on_delete=models.CASCADE, max_length=18,
blank=True, null=True, related_name='events')
lastreferenceddate = models.DateTimeField(blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
use_secondary_address = models.BooleanField(db_column='use_secondary_address__c', null=True)
stream_recording_link = models.CharField(db_column='stream_recording_link__c', max_length=255, blank=True,
null=True)
# participant_count = models.FloatField(db_column='participant_count__c', blank=True, null=True)
# prereg_url = models.CharField(db_column='prereg_url__c', max_length=1300, blank=True, null=True)
mayedit = models.BooleanField(null=True)
# open_for_preregistration = models.BooleanField(db_column='open_for_preregistration__c', null=True)
islocked = models.BooleanField(null=True)
# signinurl = models.CharField(db_column='signinurl__c', max_length=1300, blank=True, null=True)
# event_address_lookup = models.CharField(db_column='event_address_lookup__c', max_length=1300, blank=True, null=True)
event_information = models.TextField(db_column='event_information__c', blank=True, null=True)
# open_for_registration = models.BooleanField(db_column='open_for_registration__c', null=True)
# Short description of the event
description = models.TextField(db_column='description__c', blank=True, null=True)
class Meta:
managed = False
db_table = 'fragforce_event__c'
class EventParticipant(models.Model):
contact = models.ForeignKey(Contact, to_field='sfid', db_column='contact__c', on_delete=models.CASCADE,
max_length=18, blank=True, null=True)
lastvieweddate = models.DateTimeField(blank=True, null=True)
name = models.CharField(max_length=80, blank=True, null=True)
lastmodifieddate = models.DateTimeField(blank=True, null=True)
ownerid = models.CharField(max_length=18, blank=True, null=True)
mayedit = models.BooleanField(null=True)
event = models.ForeignKey(Event, to_field='sfid', db_column='fragforce_event__c', on_delete=models.CASCADE,
max_length=18, blank=True,
null=True)
isdeleted = models.BooleanField(null=True)
participant = models.BooleanField(db_column='participant__c', null=True)
systemmodstamp = models.DateTimeField(blank=True, null=True)
lastmodifiedbyid = models.CharField(max_length=18, blank=True, null=True)
lastactivitydate = models.DateField(blank=True, null=True)
islocked = models.BooleanField(null=True)
createddate = models.DateTimeField(blank=True, null=True)
name = models.CharField(db_column='name__c', max_length=120, blank=True, null=True)
createdbyid = models.CharField(max_length=18, blank=True, null=True)
lastreferenceddate = models.DateTimeField(blank=True, null=True)
sfid = models.CharField(unique=True, max_length=18, blank=True, null=True)
field_hc_lastop = models.CharField(db_column='_hc_lastop', max_length=32, blank=True, null=True)
field_hc_err = models.TextField(db_column='_hc_err', blank=True, null=True)
class Meta:
managed = False
db_table = 'event_participant__c'
| 0.003049 |
import numpy as np
from pandas._libs.tslibs.fields import (
get_date_field,
get_start_end_field,
get_timedelta_field,
)
from .tslib import _sizes
class TimeGetTimedeltaField:
params = [
_sizes,
["days", "h", "s", "seconds", "ms", "microseconds", "us", "ns", "nanoseconds"],
]
param_names = ["size", "field"]
def setup(self, size, field):
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
def time_get_timedelta_field(self, size, field):
get_timedelta_field(self.i8data, field)
class TimeGetDateField:
params = [
_sizes,
[
"Y",
"M",
"D",
"h",
"m",
"s",
"us",
"ns",
"doy",
"dow",
"woy",
"q",
"dim",
"is_leap_year",
],
]
param_names = ["size", "field"]
def setup(self, size, field):
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
def time_get_date_field(self, size, field):
get_date_field(self.i8data, field)
class TimeGetStartEndField:
params = [
_sizes,
["start", "end"],
["month", "quarter", "year"],
["B", None, "QS"],
[12, 3, 5],
]
param_names = ["size", "side", "period", "freqstr", "month_kw"]
def setup(self, size, side, period, freqstr, month_kw):
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
self.attrname = f"is_{period}_{side}"
def time_get_start_end_field(self, size, side, period, freqstr, month_kw):
get_start_end_field(self.i8data, self.attrname, freqstr, month_kw=month_kw)
| 0.001125 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.resource.requests import TaskResourceRequest, TaskResourceRequests, \
ExecutorResourceRequests, ExecutorResourceRequest
class ResourceProfile(object):
"""
Resource profile to associate with an RDD. A :class:`pyspark.resource.ResourceProfile`
allows the user to specify executor and task requirements for an RDD that will get
applied during a stage. This allows the user to change the resource requirements between
stages. This is meant to be immutable so user cannot change it after building.
.. versionadded:: 3.1.0
Notes
-----
This API is evolving.
"""
def __init__(self, _java_resource_profile=None, _exec_req=None, _task_req=None):
if _java_resource_profile is not None:
self._java_resource_profile = _java_resource_profile
else:
self._java_resource_profile = None
self._executor_resource_requests = _exec_req or {}
self._task_resource_requests = _task_req or {}
@property
def id(self):
if self._java_resource_profile is not None:
return self._java_resource_profile.id()
else:
raise RuntimeError("SparkContext must be created to get the id, get the id "
"after adding the ResourceProfile to an RDD")
@property
def taskResources(self):
if self._java_resource_profile is not None:
taskRes = self._java_resource_profile.taskResourcesJMap()
result = {}
for k, v in taskRes.items():
result[k] = TaskResourceRequest(v.resourceName(), v.amount())
return result
else:
return self._task_resource_requests
@property
def executorResources(self):
if self._java_resource_profile is not None:
execRes = self._java_resource_profile.executorResourcesJMap()
result = {}
for k, v in execRes.items():
result[k] = ExecutorResourceRequest(v.resourceName(), v.amount(),
v.discoveryScript(), v.vendor())
return result
else:
return self._executor_resource_requests
class ResourceProfileBuilder(object):
"""
Resource profile Builder to build a resource profile to associate with an RDD.
A ResourceProfile allows the user to specify executor and task requirements for
an RDD that will get applied during a stage. This allows the user to change the
resource requirements between stages.
.. versionadded:: 3.1.0
Notes
-----
This API is evolving.
"""
def __init__(self):
from pyspark.context import SparkContext
_jvm = SparkContext._jvm
if _jvm is not None:
self._jvm = _jvm
self._java_resource_profile_builder = \
_jvm.org.apache.spark.resource.ResourceProfileBuilder()
else:
self._jvm = None
self._java_resource_profile_builder = None
self._executor_resource_requests = {}
self._task_resource_requests = {}
def require(self, resourceRequest):
if isinstance(resourceRequest, TaskResourceRequests):
if self._java_resource_profile_builder is not None:
if resourceRequest._java_task_resource_requests is not None:
self._java_resource_profile_builder.require(
resourceRequest._java_task_resource_requests)
else:
taskReqs = TaskResourceRequests(self._jvm, resourceRequest.requests)
self._java_resource_profile_builder.require(
taskReqs._java_task_resource_requests)
else:
self._task_resource_requests.update(resourceRequest.requests)
else:
if self._java_resource_profile_builder is not None:
if resourceRequest._java_executor_resource_requests is not None:
self._java_resource_profile_builder.require(
resourceRequest._java_executor_resource_requests)
else:
execReqs = ExecutorResourceRequests(self._jvm, resourceRequest.requests)
self._java_resource_profile_builder.require(
execReqs._java_executor_resource_requests)
else:
self._executor_resource_requests.update(resourceRequest.requests)
return self
def clearExecutorResourceRequests(self):
if self._java_resource_profile_builder is not None:
self._java_resource_profile_builder.clearExecutorResourceRequests()
else:
self._executor_resource_requests = {}
def clearTaskResourceRequests(self):
if self._java_resource_profile_builder is not None:
self._java_resource_profile_builder.clearTaskResourceRequests()
else:
self._task_resource_requests = {}
@property
def taskResources(self):
if self._java_resource_profile_builder is not None:
taskRes = self._java_resource_profile_builder.taskResourcesJMap()
result = {}
for k, v in taskRes.items():
result[k] = TaskResourceRequest(v.resourceName(), v.amount())
return result
else:
return self._task_resource_requests
@property
def executorResources(self):
if self._java_resource_profile_builder is not None:
result = {}
execRes = self._java_resource_profile_builder.executorResourcesJMap()
for k, v in execRes.items():
result[k] = ExecutorResourceRequest(v.resourceName(), v.amount(),
v.discoveryScript(), v.vendor())
return result
else:
return self._executor_resource_requests
@property
def build(self):
if self._java_resource_profile_builder is not None:
jresourceProfile = self._java_resource_profile_builder.build()
return ResourceProfile(_java_resource_profile=jresourceProfile)
else:
return ResourceProfile(_exec_req=self._executor_resource_requests,
_task_req=self._task_resource_requests)
| 0.002671 |
import tempfile, shutil
import os
import re
import subprocess
import time
import datetime
import csv
import json, yaml
import string
from bson.objectid import ObjectId
from bson import json_util
from dateutil.parser import parse
from django.conf import settings
from hashlib import md5
from crits.core.class_mapper import class_from_value
from crits.core.exceptions import ZipFileError
from crits.core.mongo_tools import get_file
def get_file_fs(sample_md5):
"""
Read a file from the filesystem. The path to the file is:
/data/files/<md5[:2]>/<md5[2:4]>/<md5>
:param sample_md5: The MD5 of the file to read off of disk.
:type sample_md5: str
:returns: str
"""
try:
fin = open('/data/files/%s/%s/%s' % (sample_md5[:2],
sample_md5[2:4],
sample_md5),
'rb')
data = fin.read()
fin.close()
except Exception as e:
raise("error: %s" % e)
return data
def put_file_fs(data):
"""
Write a file to the filesystem. The path to write the file to is:
/data/files/<md5[:2]>/<md5[2:4]>/<md5>
:param data: The data of the file to write.
:type data: str
:returns: str (the md5 of the file written)
"""
a = md5()
a.update(data)
sample_md5 = a.hexdigest()
try:
fout = open('/data/files/%s/%s/%s' % (sample_md5[:2],
sample_md5[2:4],
sample_md5),
'wb')
fout.write(data)
fout.close()
except Exception as e:
raise("error: %s" % e)
return sample_md5
def create_zip(files, pw_protect=True):
"""
Create a zip file. Creates a temporary directory to write files to on disk
using :class:`tempfile`. Uses /usr/bin/zip as the zipping mechanism
currently. Will password protect the zip file as a default. The password for
the zip file defaults to "infected", but it can be changed in the config
under zip7_password.
:param files: The files to add to the zip file.
:type files: list of files which are in the format of a list or tuple of
(<filename>, <data>).
:param pw_protect: To password protect the zip file or not.
:type pw_protect: boolean
:returns: :class:`crits.core.exceptions.ZipFileError`, str
"""
dumpdir = ""
try:
# Zip can take data from stdin to compress, but
# you can't define the filenames within the archive,
# they show up as "-". Therefore, we need to write
# out the file, compress it and return the zip.
# Save the sample as a file in a temp directory
# NOTE: the following line was causing a "permission denied" exception.
# Removed dir arg.
from crits.config.config import CRITsConfig
crits_config = CRITsConfig.objects().first()
if crits_config:
zip7_password = crits_config.zip7_password or 'infected'
else:
zip7_password = settings.ZIP7_PASSWORD or 'infected'
dumpdir = tempfile.mkdtemp() #dir=temproot
#write out binary files
for f in files:
filename = f[0]
file_data = f[1]
# make sure our desired path doesn't already exist (some files may
# have the same name but different data)
path = dumpdir + "/" + filename.encode("utf-8")
i = 1
tmp = path
while os.path.exists(tmp):
tmp = path+"("+str(i)+")"
i += 1
with open(tmp, "wb") as fh:
fh.write(file_data)
# Build the command line for zip
# NOTE: forking subprocess instead of using Python's ZipFile library
# because ZipFile does not allow us to create password-protected zip
# archives, only read them.
# -j don't include original filepath
zipname = "zip.zip" #The name we give it doesn't really matter
args = ["/usr/bin/zip", "-r", "-j", dumpdir+"/"+zipname, dumpdir]
if pw_protect:
args += ["-P", zip7_password]
args += [dumpdir+"/"+zipname, dumpdir]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Give the process 30 seconds to complete, otherwise kill it
waitSeconds = 30
while (proc.poll() is None and waitSeconds):
time.sleep(1)
waitSeconds -= 1
zipdata = ""
if proc.returncode: # zip spit out an error
errmsg = "Error while creating archive\n" + proc.stdout.read()
raise ZipFileError, errmsg
elif not waitSeconds: # Process timed out
proc.terminate()
raise ZipFileError, "Error:\nProcess failed to terminate"
else:
with open(dumpdir + "/" + zipname, "rb") as fh:
zipdata = fh.read()
if not len(zipdata):
raise ZipFileError, "Error:\nThe zip archive contains no data"
return zipdata
except ZipFileError:
raise
except Exception, ex:
errmsg = ""
for err in ex.args:
errmsg = errmsg + " " + unicode(err)
raise ZipFileError, errmsg
finally:
if os.path.isdir(dumpdir):
shutil.rmtree(dumpdir)
def format_file(data, file_format):
"""
Format data into the provided format. Acceptable formats are:
- base64
- zlib
- raw
- invert
:param data: The data to format.
:type data: str
:param file_format: The format to convert the data into.
:type file_format: str
:returns: tuple of (<formatted_data>, <file_extension>)
"""
if data == None:
return ("", "")
if file_format == "base64":
import base64
data = base64.b64encode(data)
ext = ".b64"
elif file_format == "zlib":
import zlib
data = zlib.compress(data)
ext = ".Z"
elif file_format == "raw":
ext = ""
elif file_format == "invert":
data = ''.join([chr(ord(c) ^ 0xff) for c in data])
ext = ".ff"
return (data, ext)
def convert_datetimes_to_string(obj):
"""
Iterates over all the keys of a document to convert all datetime objects
to strings.
Will also work with ordinary datetime objects or lists of datetimes and
lists of dictionaries. Any non-datetime values will be left as-is.
:param obj: The date object(s) to convert to a string.
:type obj: datetime.datetime, list, dict
:returns: obj
"""
if isinstance(obj, datetime.datetime):
return datetime.datetime.strftime(obj, settings.PY_DATETIME_FORMAT)
elif isinstance(obj, list) or isinstance(obj, dict):
for idx in (xrange(len(obj)) if isinstance(obj, list) else obj.keys()):
obj[idx] = convert_datetimes_to_string(obj[idx])
return obj
def convert_string_to_bool(value):
"""
Converts the string values "True" or "False" to their boolean
representation.
:param value: The string.
:type value: str.
:returns: True, False
"""
if(value != None) and ((value == True) or (value == "True") or (value == "true")):
return True
else:
return False
def format_object(obj_type, obj_id, data_format="yaml", cleanse=True,
obj_sources=[], remove_source=False, remove_rels=False,
remove_schema_version=False, remove_campaign=False,
remove_buckets=False, remove_releasability=False,
remove_unsupported=False):
"""
Formats a top-level object for utilization in certain conditions. Removes
CRITs-internal necessary data so users editing the document via the
interface don't alter or have the ability to overwrite things they should
not.
:param obj_type: The CRITs type of the top-level object to format.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param data_format: The format of the returned data.
:type data_format: str of "yaml" or "json"
:param cleanse: Remove "to", "actions", "releasability", and "bucket_list"
if this is an Email or Indicator.
:type cleanse: boolean
:param obj_sources: The sources to overwrite into the document or to set
the source list to an empty list if remove_source is
False.
:type obj_sources: list
:param remove_source: Remove the source key from the document.
:type remove_source: boolean
:param remove_rels: Remove the relationships key from the document.
:type remove_rels: boolean
:param remove_schema_version: Remove the schema_version key from the
document.
:type remove_schema_version: boolean
:param remove_campaign: Remove the campaign key from the document.
:type remove_campaign: boolean
:param remove_buckets: Remove the bucket_list key from the document.
:type remove_buckets: boolean
:param remove_releasability: Remove the releasability key from the document.
:type remove_releasability: boolean
:param remove_unsupported: Remove the unsupported_attrs key from the document.
:type remove_unsupported: boolean
:returns: str
"""
collection = settings.CRITS_TYPES[obj_type]
obj_class = class_from_value(obj_type, obj_id)
if not obj_class:
return ""
data = obj_class.to_dict()
if data is None:
return ""
# Emails use raw_header (singular) as the attribute but store it as
# raw_headers (plural) in the database. When viewing an email in YAML
# or JSON convert from plural to singular. This will allow a copy/paste
# of these views to be imported correctly.
if 'raw_headers' in data:
data['raw_header'] = data['raw_headers']
del data['raw_headers']
if cleanse and collection in [settings.COL_EMAIL, settings.COL_INDICATORS]:
if "to" in data:
del data["to"]
if "actions" in data:
del data["actions"]
if "releasability" in data:
del data["releasability"]
if "bucket_list" in data:
del data["bucket_list"]
if remove_source and 'source' in data:
del data["source"]
elif 'source' in data:
data['source'] = obj_sources
if remove_rels and 'relationships' in data:
del data["relationships"]
if remove_rels and 'objects' in data:
del data["objects"]
if remove_schema_version and 'schema_version' in data:
del data["schema_version"]
if remove_campaign and 'campaign' in data:
del data["campaign"]
del data["_id"]
if data.has_key("modified"):
del data["modified"]
if remove_buckets and 'bucket_list' in data:
del data['bucket_list']
if remove_releasability and 'releasability' in data:
del data['releasability']
if remove_unsupported and 'unsupported_attrs' in data:
del data['unsupported_attrs']
data = json.dumps(convert_datetimes_to_string(data),
default=json_util.default)
if data_format == "yaml":
data = yaml.dump(yaml.load(data), default_flow_style=False)
elif data_format == "json":
data = json.dumps(json.loads(data))
return data
def make_ascii_strings(md5=None, data=None):
"""
Find and return all printable ASCII strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
strings_data = 'ASCII Strings\n'
strings_data += "-" * 30
strings_data += "\n"
ascii_regex = re.compile('([ -~]{4,})')
matches = ascii_regex.findall(data)
strings_data += '\n'.join([x for x in matches])
return strings_data + "\n\n\n\n"
def make_unicode_strings(md5=None, data=None):
"""
Find and return all printable Unicode strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
strings_data = 'Unicode Strings\n'
strings_data += "-" * 30
strings_data += "\n"
unicode_regex = re.compile('(([%s]\x00){4,})' % string.printable)
matches = unicode_regex.findall(data)
strings_data += '\n'.join([x[0].replace('\x00', '') for x in matches])
return strings_data + "\n\n\n\n"
def make_stackstrings(md5=None, data=None):
"""
Find and return all stack strings in a string.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
x = 0
prev = 0
strings = ''
while x < len(data):
if (data[x] == '\xc6') and ((data[x+1] == '\x45') or (data[x+1] == '\x84')):
a = ord(data[x+3])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+3]
prev = x
x += 4
elif (data[x] == '\xc6') and (data[x+1] == '\x44'):
a = ord(data[x+4])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+4]
prev = x
x += 5
elif (data[x] == '\xc6') and ((data[x+1] == '\x05') or (data[x+1] == '\x85')):
a = ord(data[x+6])
if (a <= 126 and a >= 32) or (a==9): strings += data[x+6]
prev = x
x += 7
else:
if ((x - prev) ==12): strings += '\n'
x += 1
strings = strings.replace('\x00', '\r')
return strings
def make_hex(md5=None, data=None):
"""
Convert data into hex formatted output.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:returns: str
"""
if md5:
data = get_file(md5)
length = 16
hex_data = ''
digits = 4 if isinstance(data, unicode) else 2
for i in xrange(0, len(data), length):
s = data[i:i+length]
hexa = ' '.join(["%0*X" % (digits, ord(x)) for x in s])
text = ' '.join([x if 0x20 <= ord(x) < 0x7F else '.' for x in s])
hex_data += "%04X %-*s %s\r\n" % (i, length*(digits + 1), hexa, text)
return hex_data
def xor_string(md5=None, data=None, key=0, null=0):
"""
XOR data.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:param key: The XOR key to use.
:type key: int
:param null: Whether or not to skip nulls.
:type null: int (0 or 1)
:returns: str
"""
if md5:
data = get_file(md5)
out = ''
for c in data:
if ord(c) == 0 and null == 1:
out += c
elif ord(c) == key and null == 1:
out += c
else:
out += chr(ord(c) ^ key)
return out
def xor_search(md5=None, data=None, string=None, skip_nulls=0):
"""
Search a string for potential XOR keys. Uses a small list of common
plaintext terms, XORs those terms using keys 0-255 and searches the data for
any match. If there is a match, that key is included in the results.
:param md5: The MD5 of the Sample to parse.
:type md5: str
:param data: The data to parse.
:type data: str
:param string: The custom string to XOR and search for.
:type string: str
:param skip_nulls: Whether or not to skip nulls.
:type skip_nulls: int (0 or 1)
:returns: list
"""
if md5:
data = get_file(md5)
if string is None or string == '':
plaintext_list = [
'This program',
'kernel32',
'KERNEL32',
'http',
'svchost',
'Microsoft',
'PE for WIN32',
'startxref',
'!This program cannot be run in DOS mode',
'\xD0\xCF\x11\xE0\xA1\xB1\x1a\xE1',
'D\x00o\x00c\x00u\x00m\x00e\x00n\x00t\x00 \x00S\x00u\x00m\x00m\x00a\x00r\x00y\x00 \x00I\x00n\x00f\x00o\x00r\x00m\x00a\x00t\x00i\x00o\x00n',
]
else:
plaintext_list = ["%s" % string]
results = []
for plaintext in plaintext_list:
for i in range(0, 255):
xord_string = xor_string(data=plaintext,
key=i,
null=skip_nulls)
if xord_string in data:
if i not in results:
results.append(i)
results.sort()
return results
def make_list(s):
"""
Make a list of out a string of data that needs to be parsed using
:class:`csv.reader`.
:param s: The string to convert
:type s: str
:returns: list
"""
l = []
l.append(s)
a = csv.reader(l, skipinitialspace=True)
b = None
for i in a:
b = i
return b
def remove_html_tags(data):
"""
Remove html tags from a string.
:param data: The string to parse.
:type data: str
:returns: str
"""
p = re.compile(r'<.*?>')
return p.sub('', data)
def datestring_to_isodate(datestring):
"""
Parse a string using :class:`dateutil` and return the results.
:param datestring: The date string to parse.
:returns: datetime.datetime
"""
return parse(datestring, fuzzy=True)
def clean_dict(dict_, keys_to_remove):
"""
Remove keys we don't want to display to the user.
Can also be used to remove keys from user input that we want to manage
ourselves. In the latter case, be sure the query is using $set and not
completely replacing the document, otherwise keys added elsewhere might
be lost.
:param dict_: The dictionary to iterate over.
:type dict_: dict
:param keys_to_remove: The list of keys we want to remove.
:type keys_to_remove: list
"""
for key in keys_to_remove:
if key in dict_:
del dict_[key]
def json_handler(obj):
"""
Handles converting datetimes and Mongo ObjectIds to string.
Usage: json.dumps(..., default=json_handler)
:param obj: The object that needs converting.
:type obj: datetime.datetime, ObjectId
:returns: str
"""
if isinstance(obj, datetime.datetime):
return datetime.datetime.strftime(obj, settings.PY_DATETIME_FORMAT)
elif isinstance(obj, ObjectId):
return str(obj)
def generate_qrcode(data, size):
"""
Generate a QR Code Image from a string.
Will attempt to import qrcode (which also requires Pillow) and io. If
this fails we will return None.
:param data: data to be converted into a QR Code
:type data: str
:param size: tuple of (width, height) in pixels to resize the QR Code
:type size: tuple
:returns: str in base64 format
"""
try:
import qrcode, io
except:
return None
a = io.BytesIO()
qr = qrcode.QRCode()
qr.add_data(data)
img = qr.make_image().resize(size)
img.save(a, 'PNG')
qr_img = a.getvalue().encode('base64').replace('\n', '')
a.close()
return qr_img
def validate_md5_checksum(md5_checksum):
"""
Validates that string is truly an MD5.
:param md5_checksum: The string to validate.
:type md5_checksum: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{32}$", md5_checksum) == None:
retVal['message'] += "The MD5 digest needs to be 32 hex characters."
retVal['success'] = False
return retVal
def validate_sha1_checksum(sha1_checksum):
"""
Validates that string is truly a SHA1.
:param sha1_checksum: str
:return: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{40}$", sha1_checksum) == None:
retVal['message'] += "The SHA1 digest needs to be 40 hex characters."
retVal['success'] = False
return retVal
def validate_sha256_checksum(sha256_checksum):
"""
Validates that string is truly a SHA256.
:param sha256_checksum: The string to validate.
:type sha256_checksum: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
retVal = {'success': True, 'message': ''}
if re.match("^[a-fA-F0-9]{64}$", sha256_checksum) == None:
retVal['message'] += "The SHA256 digest needs to be 64 hex characters."
retVal['success'] = False
return retVal
def detect_pcap(data):
"""
Detect if the data has the magic numbers for a PCAP.
:param data: The data to inspect.
:type data: str
:returns: bool
"""
magic = ''.join(x.encode('hex') for x in data[:4])
if magic in (
'a1b2c3d4', #identical
'd4c3b2a1', #swapped
'4d3cb2a1',
'a1b23c4d', #nanosecond resolution
'0a0d0d0a', #pcap-ng
):
return True
else:
return False
| 0.003322 |
""" Cisco_IOS_XR_clns_isis_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR clns\-isis package configuration.
This module contains definitions
for the following management objects\:
isis\: IS\-IS configuration for all instances
This YANG module augments the
Cisco\-IOS\-XR\-snmp\-agent\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes import IsisAddressFamilyEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes import IsisInternalLevelEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes import IsisSubAddressFamilyEnum
class IsisAdjCheckEnum(Enum):
"""
IsisAdjCheckEnum
Isis adj check
.. data:: DISABLED = 0
Disabled
"""
DISABLED = 0
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisAdjCheckEnum']
class IsisAdvTypeExternalEnum(Enum):
"""
IsisAdvTypeExternalEnum
Isis adv type external
.. data:: EXTERNAL = 1
External
"""
EXTERNAL = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisAdvTypeExternalEnum']
class IsisAdvTypeInterLevelEnum(Enum):
"""
IsisAdvTypeInterLevelEnum
Isis adv type inter level
.. data:: INTER_LEVEL = 1
InterLevel
"""
INTER_LEVEL = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisAdvTypeInterLevelEnum']
class IsisApplyWeightEnum(Enum):
"""
IsisApplyWeightEnum
Isis apply weight
.. data:: ECMP_ONLY = 1
Apply weight to ECMP prefixes
.. data:: UCMP_ONLY = 2
Apply weight to UCMP prefixes
"""
ECMP_ONLY = 1
UCMP_ONLY = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisApplyWeightEnum']
class IsisAttachedBitEnum(Enum):
"""
IsisAttachedBitEnum
Isis attached bit
.. data:: AREA = 0
Computed from the attached areas
.. data:: ON = 1
Forced ON
.. data:: OFF = 2
Forced OFF
"""
AREA = 0
ON = 1
OFF = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisAttachedBitEnum']
class IsisAuthenticationAlgorithmEnum(Enum):
"""
IsisAuthenticationAlgorithmEnum
Isis authentication algorithm
.. data:: CLEARTEXT = 1
Cleartext password
.. data:: HMAC_MD5 = 2
HMAC-MD5 checksum
.. data:: KEYCHAIN = 3
Key Chain authentication
"""
CLEARTEXT = 1
HMAC_MD5 = 2
KEYCHAIN = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisAuthenticationAlgorithmEnum']
class IsisAuthenticationFailureModeEnum(Enum):
"""
IsisAuthenticationFailureModeEnum
Isis authentication failure mode
.. data:: DROP = 0
Drop non-authenticating PDUs
.. data:: SEND_ONLY = 1
Accept non-authenticating PDUs
"""
DROP = 0
SEND_ONLY = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisAuthenticationFailureModeEnum']
class IsisConfigurableLevelsEnum(Enum):
"""
IsisConfigurableLevelsEnum
Isis configurable levels
.. data:: LEVEL1 = 1
Level1
.. data:: LEVEL2 = 2
Level2
.. data:: LEVEL1_AND2 = 3
Both Levels
"""
LEVEL1 = 1
LEVEL2 = 2
LEVEL1_AND2 = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisConfigurableLevelsEnum']
class IsisHelloPaddingEnum(Enum):
"""
IsisHelloPaddingEnum
Isis hello padding
.. data:: NEVER = 0
Never pad Hellos
.. data:: SOMETIMES = 1
Pad Hellos during adjacency formation only
"""
NEVER = 0
SOMETIMES = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisHelloPaddingEnum']
class IsisInterfaceAfStateEnum(Enum):
"""
IsisInterfaceAfStateEnum
Isis interface af state
.. data:: DISABLE = 0
Disable
"""
DISABLE = 0
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisInterfaceAfStateEnum']
class IsisInterfaceStateEnum(Enum):
"""
IsisInterfaceStateEnum
Isis interface state
.. data:: SHUTDOWN = 0
Shutdown
.. data:: SUPPRESSED = 1
Suppressed
.. data:: PASSIVE = 2
Passive
"""
SHUTDOWN = 0
SUPPRESSED = 1
PASSIVE = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisInterfaceStateEnum']
class IsisLabelPreferenceEnum(Enum):
"""
IsisLabelPreferenceEnum
Isis label preference
.. data:: LDP = 0
Label Distribution Protocol
.. data:: SEGMENT_ROUTING = 1
Segment Routing
"""
LDP = 0
SEGMENT_ROUTING = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisLabelPreferenceEnum']
class IsisMetricEnum(Enum):
"""
IsisMetricEnum
Isis metric
.. data:: INTERNAL = 0
Internal metric
.. data:: EXTERNAL = 1
External metric
"""
INTERNAL = 0
EXTERNAL = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMetricEnum']
class IsisMetricStyleEnum(Enum):
"""
IsisMetricStyleEnum
Isis metric style
.. data:: OLD_METRIC_STYLE = 0
ISO 10589 metric style (old-style)
.. data:: NEW_METRIC_STYLE = 1
32-bit metric style (new-style)
.. data:: BOTH_METRIC_STYLE = 2
Both forms of metric style
"""
OLD_METRIC_STYLE = 0
NEW_METRIC_STYLE = 1
BOTH_METRIC_STYLE = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMetricStyleEnum']
class IsisMetricStyleTransitionEnum(Enum):
"""
IsisMetricStyleTransitionEnum
Isis metric style transition
.. data:: DISABLED = 0
Disabled
.. data:: ENABLED = 1
Enabled
"""
DISABLED = 0
ENABLED = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMetricStyleTransitionEnum']
class IsisMibAdjacencyChangeBooleanEnum(Enum):
"""
IsisMibAdjacencyChangeBooleanEnum
Isis mib adjacency change boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 17
Enable
"""
FALSE = 0
TRUE = 17
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibAdjacencyChangeBooleanEnum']
class IsisMibAllBooleanEnum(Enum):
"""
IsisMibAllBooleanEnum
Isis mib all boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 19
Enable
"""
FALSE = 0
TRUE = 19
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibAllBooleanEnum']
class IsisMibAreaMismatchBooleanEnum(Enum):
"""
IsisMibAreaMismatchBooleanEnum
Isis mib area mismatch boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 12
Enable
"""
FALSE = 0
TRUE = 12
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibAreaMismatchBooleanEnum']
class IsisMibAttemptToExceedMaxSequenceBooleanEnum(Enum):
"""
IsisMibAttemptToExceedMaxSequenceBooleanEnum
Isis mib attempt to exceed max sequence boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 4
Enable
"""
FALSE = 0
TRUE = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibAttemptToExceedMaxSequenceBooleanEnum']
class IsisMibAuthenticationFailureBooleanEnum(Enum):
"""
IsisMibAuthenticationFailureBooleanEnum
Isis mib authentication failure boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 10
Enable
"""
FALSE = 0
TRUE = 10
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibAuthenticationFailureBooleanEnum']
class IsisMibAuthenticationTypeFailureBooleanEnum(Enum):
"""
IsisMibAuthenticationTypeFailureBooleanEnum
Isis mib authentication type failure boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 9
Enable
"""
FALSE = 0
TRUE = 9
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibAuthenticationTypeFailureBooleanEnum']
class IsisMibCorruptedLspDetectedBooleanEnum(Enum):
"""
IsisMibCorruptedLspDetectedBooleanEnum
Isis mib corrupted lsp detected boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 3
Enable
"""
FALSE = 0
TRUE = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibCorruptedLspDetectedBooleanEnum']
class IsisMibDatabaseOverFlowBooleanEnum(Enum):
"""
IsisMibDatabaseOverFlowBooleanEnum
Isis mib database over flow boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 1
Enable
"""
FALSE = 0
TRUE = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibDatabaseOverFlowBooleanEnum']
class IsisMibIdLengthMismatchBooleanEnum(Enum):
"""
IsisMibIdLengthMismatchBooleanEnum
Isis mib id length mismatch boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 5
Enable
"""
FALSE = 0
TRUE = 5
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibIdLengthMismatchBooleanEnum']
class IsisMibLspErrorDetectedBooleanEnum(Enum):
"""
IsisMibLspErrorDetectedBooleanEnum
Isis mib lsp error detected boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 18
Enable
"""
FALSE = 0
TRUE = 18
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibLspErrorDetectedBooleanEnum']
class IsisMibLspTooLargeToPropagateBooleanEnum(Enum):
"""
IsisMibLspTooLargeToPropagateBooleanEnum
Isis mib lsp too large to propagate boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 14
Enable
"""
FALSE = 0
TRUE = 14
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibLspTooLargeToPropagateBooleanEnum']
class IsisMibManualAddressDropsBooleanEnum(Enum):
"""
IsisMibManualAddressDropsBooleanEnum
Isis mib manual address drops boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 2
Enable
"""
FALSE = 0
TRUE = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibManualAddressDropsBooleanEnum']
class IsisMibMaxAreaAddressMismatchBooleanEnum(Enum):
"""
IsisMibMaxAreaAddressMismatchBooleanEnum
Isis mib max area address mismatch boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 6
Enable
"""
FALSE = 0
TRUE = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibMaxAreaAddressMismatchBooleanEnum']
class IsisMibOriginatedLspBufferSizeMismatchBooleanEnum(Enum):
"""
IsisMibOriginatedLspBufferSizeMismatchBooleanEnum
Isis mib originated lsp buffer size mismatch
boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 15
Enable
"""
FALSE = 0
TRUE = 15
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibOriginatedLspBufferSizeMismatchBooleanEnum']
class IsisMibOwnLspPurgeBooleanEnum(Enum):
"""
IsisMibOwnLspPurgeBooleanEnum
Isis mib own lsp purge boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 7
Enable
"""
FALSE = 0
TRUE = 7
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibOwnLspPurgeBooleanEnum']
class IsisMibProtocolsSupportedMismatchBooleanEnum(Enum):
"""
IsisMibProtocolsSupportedMismatchBooleanEnum
Isis mib protocols supported mismatch boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 16
Enable
"""
FALSE = 0
TRUE = 16
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibProtocolsSupportedMismatchBooleanEnum']
class IsisMibRejectedAdjacencyBooleanEnum(Enum):
"""
IsisMibRejectedAdjacencyBooleanEnum
Isis mib rejected adjacency boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 13
Enable
"""
FALSE = 0
TRUE = 13
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibRejectedAdjacencyBooleanEnum']
class IsisMibSequenceNumberSkipBooleanEnum(Enum):
"""
IsisMibSequenceNumberSkipBooleanEnum
Isis mib sequence number skip boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 8
Enable
"""
FALSE = 0
TRUE = 8
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibSequenceNumberSkipBooleanEnum']
class IsisMibVersionSkewBooleanEnum(Enum):
"""
IsisMibVersionSkewBooleanEnum
Isis mib version skew boolean
.. data:: FALSE = 0
Disable
.. data:: TRUE = 11
Enable
"""
FALSE = 0
TRUE = 11
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMibVersionSkewBooleanEnum']
class IsisMicroLoopAvoidanceEnum(Enum):
"""
IsisMicroLoopAvoidanceEnum
Isis micro loop avoidance
.. data:: NOT_SET = 0
No Avoidance type set
.. data:: MICRO_LOOP_AVOIDANCE_ALL = 1
Provide mirco loop avoidance for all prefixes
.. data:: MICRO_LOOP_AVOIDANCE_PROTECTED = 2
Provide mirco loop avoidance only for protected
prefixes
"""
NOT_SET = 0
MICRO_LOOP_AVOIDANCE_ALL = 1
MICRO_LOOP_AVOIDANCE_PROTECTED = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisMicroLoopAvoidanceEnum']
class IsisNsfFlavorEnum(Enum):
"""
IsisNsfFlavorEnum
Isis nsf flavor
.. data:: CISCO_PROPRIETARY_NSF = 1
Cisco proprietary NSF
.. data:: IETF_STANDARD_NSF = 2
IETF standard NSF
"""
CISCO_PROPRIETARY_NSF = 1
IETF_STANDARD_NSF = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisNsfFlavorEnum']
class IsisOverloadBitModeEnum(Enum):
"""
IsisOverloadBitModeEnum
Isis overload bit mode
.. data:: PERMANENTLY_SET = 1
Set always
.. data:: STARTUP_PERIOD = 2
Set during the startup period
.. data:: WAIT_FOR_BGP = 3
Set until BGP comverges
"""
PERMANENTLY_SET = 1
STARTUP_PERIOD = 2
WAIT_FOR_BGP = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisOverloadBitModeEnum']
class IsisPrefixPriorityEnum(Enum):
"""
IsisPrefixPriorityEnum
Isis prefix priority
.. data:: CRITICAL_PRIORITY = 0
Critical prefix priority
.. data:: HIGH_PRIORITY = 1
High prefix priority
.. data:: MEDIUM_PRIORITY = 2
Medium prefix priority
"""
CRITICAL_PRIORITY = 0
HIGH_PRIORITY = 1
MEDIUM_PRIORITY = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisPrefixPriorityEnum']
class IsisRedistProtoEnum(Enum):
"""
IsisRedistProtoEnum
Isis redist proto
.. data:: CONNECTED = 0
Connected
.. data:: STATIC = 1
Static
.. data:: OSPF = 2
OSPF
.. data:: BGP = 3
BGP
.. data:: ISIS = 4
ISIS
.. data:: OSPFV3 = 5
OSPFv3
.. data:: RIP = 6
RIP
.. data:: EIGRP = 7
EIGRP
.. data:: SUBSCRIBER = 8
Subscriber
.. data:: APPLICATION = 9
Application
.. data:: MOBILE = 10
Mobile
"""
CONNECTED = 0
STATIC = 1
OSPF = 2
BGP = 3
ISIS = 4
OSPFV3 = 5
RIP = 6
EIGRP = 7
SUBSCRIBER = 8
APPLICATION = 9
MOBILE = 10
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisRedistProtoEnum']
class IsisRemoteLfaEnum(Enum):
"""
IsisRemoteLfaEnum
Isis remote lfa
.. data:: REMOTE_LFA_NONE = 0
No remote LFA option set
.. data:: REMOTE_LFA_TUNNEL_LDP = 1
Construct remote LFA tunnel using MPLS LDP
"""
REMOTE_LFA_NONE = 0
REMOTE_LFA_TUNNEL_LDP = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisRemoteLfaEnum']
class IsisSnpAuthEnum(Enum):
"""
IsisSnpAuthEnum
Isis snp auth
.. data:: SEND_ONLY = 0
Authenticate SNP send only
.. data:: FULL = 1
Authenticate SNP send and recv
"""
SEND_ONLY = 0
FULL = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisSnpAuthEnum']
class IsisexplicitNullFlagEnum(Enum):
"""
IsisexplicitNullFlagEnum
Isisexplicit null flag
.. data:: DISABLE = 0
Disable EXPLICITNULL
.. data:: ENABLE = 1
Enable EXPLICITNULL
"""
DISABLE = 0
ENABLE = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisexplicitNullFlagEnum']
class IsisfrrEnum(Enum):
"""
IsisfrrEnum
Isisfrr
.. data:: PER_LINK = 1
Prefix independent per-link computation
.. data:: PER_PREFIX = 2
Prefix dependent computation
"""
PER_LINK = 1
PER_PREFIX = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisfrrEnum']
class IsisfrrLoadSharingEnum(Enum):
"""
IsisfrrLoadSharingEnum
Isisfrr load sharing
.. data:: DISABLE = 1
Disable load sharing of prefixes across
multiple backups
"""
DISABLE = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisfrrLoadSharingEnum']
class IsisfrrTiebreakerEnum(Enum):
"""
IsisfrrTiebreakerEnum
Isisfrr tiebreaker
.. data:: DOWNSTREAM = 0
Prefer backup path via downstream node
.. data:: LC_DISJOINT = 1
Prefer line card disjoint backup path
.. data:: LOWEST_BACKUP_METRIC = 2
Prefer backup path with lowest total metric
.. data:: NODE_PROTECTING = 3
Prefer node protecting backup path
.. data:: PRIMARY_PATH = 4
Prefer backup path from ECMP set
.. data:: SECONDARY_PATH = 5
Prefer non-ECMP backup path
.. data:: SRLG_DISJOINT = 6
Prefer SRLG disjoint backup path
"""
DOWNSTREAM = 0
LC_DISJOINT = 1
LOWEST_BACKUP_METRIC = 2
NODE_PROTECTING = 3
PRIMARY_PATH = 4
SECONDARY_PATH = 5
SRLG_DISJOINT = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisfrrTiebreakerEnum']
class IsisispfStateEnum(Enum):
"""
IsisispfStateEnum
Isisispf state
.. data:: ENABLED = 1
Enabled
"""
ENABLED = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisispfStateEnum']
class IsisphpFlagEnum(Enum):
"""
IsisphpFlagEnum
Isisphp flag
.. data:: ENABLE = 0
Enable PHP
.. data:: DISABLE = 1
Disable PHP
"""
ENABLE = 0
DISABLE = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsisphpFlagEnum']
class IsissidEnum(Enum):
"""
IsissidEnum
Isissid
.. data:: INDEX = 1
SID as an index
.. data:: ABSOLUTE = 2
SID as an absolute label
"""
INDEX = 1
ABSOLUTE = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['IsissidEnum']
class NflagClearEnum(Enum):
"""
NflagClearEnum
Nflag clear
.. data:: DISABLE = 0
Disable N-flag-clear
.. data:: ENABLE = 1
Enable N-flag-clear
"""
DISABLE = 0
ENABLE = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['NflagClearEnum']
class Isis(object):
"""
IS\-IS configuration for all instances
.. attribute:: instances
IS\-IS instance configuration
**type**\: :py:class:`Instances <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.instances = Isis.Instances()
self.instances.parent = self
class Instances(object):
"""
IS\-IS instance configuration
.. attribute:: instance
Configuration for a single IS\-IS instance
**type**\: list of :py:class:`Instance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.instance = YList()
self.instance.parent = self
self.instance.name = 'instance'
class Instance(object):
"""
Configuration for a single IS\-IS instance
.. attribute:: instance_name <key>
Instance identifier
**type**\: str
**range:** 0..40
.. attribute:: afs
Per\-address\-family configuration
**type**\: :py:class:`Afs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs>`
.. attribute:: distribute
IS\-IS Distribute BGP\-LS configuration
**type**\: :py:class:`Distribute <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Distribute>`
.. attribute:: dynamic_host_name
If TRUE, dynamic hostname resolution is disabled, and system IDs will always be displayed by show and debug output
**type**\: bool
.. attribute:: ignore_lsp_errors
If TRUE, LSPs recieved with bad checksums will result in the purging of that LSP from the LSP DB. If FALSE or not set, the received LSP will just be ignored
**type**\: bool
.. attribute:: interfaces
Per\-interface configuration
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces>`
.. attribute:: is_type
IS type of the IS\-IS process
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: link_groups
Link Group
**type**\: :py:class:`LinkGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LinkGroups>`
.. attribute:: log_adjacency_changes
Log changes in adjacency state
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: log_pdu_drops
Log PDU drops
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: lsp_accept_passwords
LSP/SNP accept password configuration
**type**\: :py:class:`LspAcceptPasswords <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspAcceptPasswords>`
.. attribute:: lsp_arrival_times
LSP arrival time configuration
**type**\: :py:class:`LspArrivalTimes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspArrivalTimes>`
.. attribute:: lsp_check_intervals
LSP checksum check interval configuration
**type**\: :py:class:`LspCheckIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspCheckIntervals>`
.. attribute:: lsp_generation_intervals
LSP generation\-interval configuration
**type**\: :py:class:`LspGenerationIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspGenerationIntervals>`
.. attribute:: lsp_lifetimes
LSP lifetime configuration
**type**\: :py:class:`LspLifetimes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspLifetimes>`
.. attribute:: lsp_mtus
LSP MTU configuration
**type**\: :py:class:`LspMtus <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspMtus>`
.. attribute:: lsp_passwords
LSP/SNP password configuration
**type**\: :py:class:`LspPasswords <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspPasswords>`
.. attribute:: lsp_refresh_intervals
LSP refresh\-interval configuration
**type**\: :py:class:`LspRefreshIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspRefreshIntervals>`
.. attribute:: max_link_metrics
Max Link Metric configuration
**type**\: :py:class:`MaxLinkMetrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.MaxLinkMetrics>`
.. attribute:: nets
NET configuration
**type**\: :py:class:`Nets <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Nets>`
.. attribute:: nsf
IS\-IS NSF configuration
**type**\: :py:class:`Nsf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Nsf>`
.. attribute:: nsr
IS\-IS NSR configuration
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: overload_bits
LSP overload\-bit configuration
**type**\: :py:class:`OverloadBits <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.OverloadBits>`
.. attribute:: running
Flag to indicate that instance should be running. This must be the first object created when an IS\-IS instance is configured, and the last object deleted when it is deconfigured. When this object is deleted, the IS\-IS instance will exit
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: srgb
Segment Routing Global Block configuration
**type**\: :py:class:`Srgb <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Srgb>`
.. attribute:: trace_buffer_size
Trace buffer size configuration
**type**\: :py:class:`TraceBufferSize <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.TraceBufferSize>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.instance_name = None
self.afs = Isis.Instances.Instance.Afs()
self.afs.parent = self
self.distribute = None
self.dynamic_host_name = None
self.ignore_lsp_errors = None
self.interfaces = Isis.Instances.Instance.Interfaces()
self.interfaces.parent = self
self.is_type = None
self.link_groups = Isis.Instances.Instance.LinkGroups()
self.link_groups.parent = self
self.log_adjacency_changes = None
self.log_pdu_drops = None
self.lsp_accept_passwords = Isis.Instances.Instance.LspAcceptPasswords()
self.lsp_accept_passwords.parent = self
self.lsp_arrival_times = Isis.Instances.Instance.LspArrivalTimes()
self.lsp_arrival_times.parent = self
self.lsp_check_intervals = Isis.Instances.Instance.LspCheckIntervals()
self.lsp_check_intervals.parent = self
self.lsp_generation_intervals = Isis.Instances.Instance.LspGenerationIntervals()
self.lsp_generation_intervals.parent = self
self.lsp_lifetimes = Isis.Instances.Instance.LspLifetimes()
self.lsp_lifetimes.parent = self
self.lsp_mtus = Isis.Instances.Instance.LspMtus()
self.lsp_mtus.parent = self
self.lsp_passwords = Isis.Instances.Instance.LspPasswords()
self.lsp_passwords.parent = self
self.lsp_refresh_intervals = Isis.Instances.Instance.LspRefreshIntervals()
self.lsp_refresh_intervals.parent = self
self.max_link_metrics = Isis.Instances.Instance.MaxLinkMetrics()
self.max_link_metrics.parent = self
self.nets = Isis.Instances.Instance.Nets()
self.nets.parent = self
self.nsf = Isis.Instances.Instance.Nsf()
self.nsf.parent = self
self.nsr = None
self.overload_bits = Isis.Instances.Instance.OverloadBits()
self.overload_bits.parent = self
self.running = None
self.srgb = None
self.trace_buffer_size = Isis.Instances.Instance.TraceBufferSize()
self.trace_buffer_size.parent = self
class Srgb(object):
"""
Segment Routing Global Block configuration
.. attribute:: lower_bound
The lower bound of the SRGB
**type**\: int
**range:** 16000..1048574
**mandatory**\: True
.. attribute:: upper_bound
The upper bound of the SRGB
**type**\: int
**range:** 16001..1048575
**mandatory**\: True
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.lower_bound = None
self.upper_bound = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:srgb'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.lower_bound is not None:
return True
if self.upper_bound is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Srgb']['meta_info']
class LspGenerationIntervals(object):
"""
LSP generation\-interval configuration
.. attribute:: lsp_generation_interval
LSP generation scheduling parameters
**type**\: list of :py:class:`LspGenerationInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspGenerationIntervals.LspGenerationInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_generation_interval = YList()
self.lsp_generation_interval.parent = self
self.lsp_generation_interval.name = 'lsp_generation_interval'
class LspGenerationInterval(object):
"""
LSP generation scheduling parameters
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: initial_wait
Initial wait before generating local LSP in milliseconds
**type**\: int
**range:** 0..120000
.. attribute:: maximum_wait
Maximum wait before generating local LSP in milliseconds
**type**\: int
**range:** 0..120000
.. attribute:: secondary_wait
Secondary wait before generating local LSP in milliseconds
**type**\: int
**range:** 0..120000
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.initial_wait = None
self.maximum_wait = None
self.secondary_wait = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-generation-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.initial_wait is not None:
return True
if self.maximum_wait is not None:
return True
if self.secondary_wait is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspGenerationIntervals.LspGenerationInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-generation-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_generation_interval is not None:
for child_ref in self.lsp_generation_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspGenerationIntervals']['meta_info']
class LspArrivalTimes(object):
"""
LSP arrival time configuration
.. attribute:: lsp_arrival_time
Minimum LSP arrival time
**type**\: list of :py:class:`LspArrivalTime <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspArrivalTimes.LspArrivalTime>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_arrival_time = YList()
self.lsp_arrival_time.parent = self
self.lsp_arrival_time.name = 'lsp_arrival_time'
class LspArrivalTime(object):
"""
Minimum LSP arrival time
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: initial_wait
Initial delay expected to take since last LSPin milliseconds
**type**\: int
**range:** 0..120000
.. attribute:: maximum_wait
Maximum delay expected to take since last LSPin milliseconds
**type**\: int
**range:** 0..120000
.. attribute:: secondary_wait
Secondary delay expected to take since last LSPin milliseconds
**type**\: int
**range:** 0..120000
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.initial_wait = None
self.maximum_wait = None
self.secondary_wait = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-arrival-time[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.initial_wait is not None:
return True
if self.maximum_wait is not None:
return True
if self.secondary_wait is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspArrivalTimes.LspArrivalTime']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-arrival-times'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_arrival_time is not None:
for child_ref in self.lsp_arrival_time:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspArrivalTimes']['meta_info']
class TraceBufferSize(object):
"""
Trace buffer size configuration
.. attribute:: detailed
Buffer size for detailed traces
**type**\: int
**range:** 1..1000000
.. attribute:: severe
Buffer size for severe trace
**type**\: int
**range:** 1..1000000
.. attribute:: standard
Buffer size for standard traces
**type**\: int
**range:** 1..1000000
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.detailed = None
self.severe = None
self.standard = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:trace-buffer-size'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.detailed is not None:
return True
if self.severe is not None:
return True
if self.standard is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.TraceBufferSize']['meta_info']
class MaxLinkMetrics(object):
"""
Max Link Metric configuration
.. attribute:: max_link_metric
Max Link Metric
**type**\: list of :py:class:`MaxLinkMetric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.MaxLinkMetrics.MaxLinkMetric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.max_link_metric = YList()
self.max_link_metric.parent = self
self.max_link_metric.name = 'max_link_metric'
class MaxLinkMetric(object):
"""
Max Link Metric
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:max-link-metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.MaxLinkMetrics.MaxLinkMetric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:max-link-metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.max_link_metric is not None:
for child_ref in self.max_link_metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.MaxLinkMetrics']['meta_info']
class Afs(object):
"""
Per\-address\-family configuration
.. attribute:: af
Configuration for an IS\-IS address\-family. If a named (non\-default) topology is being created it must be multicast
**type**\: list of :py:class:`Af <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af = YList()
self.af.parent = self
self.af.name = 'af'
class Af(object):
"""
Configuration for an IS\-IS address\-family. If
a named (non\-default) topology is being
created it must be multicast.
.. attribute:: af_name <key>
Address family
**type**\: :py:class:`IsisAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisAddressFamilyEnum>`
.. attribute:: saf_name <key>
Sub address family
**type**\: :py:class:`IsisSubAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisSubAddressFamilyEnum>`
.. attribute:: af_data
Data container
**type**\: :py:class:`AfData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData>`
.. attribute:: topology_name
keys\: topology\-name
**type**\: list of :py:class:`TopologyName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af_name = None
self.saf_name = None
self.af_data = None
self.topology_name = YList()
self.topology_name.parent = self
self.topology_name.name = 'topology_name'
class AfData(object):
"""
Data container.
.. attribute:: adjacency_check
Suppress check for consistent AF support on received IIHs
**type**\: :py:class:`IsisAdjCheckEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAdjCheckEnum>`
.. attribute:: admin_distances
Per\-route administrative distanceconfiguration
**type**\: :py:class:`AdminDistances <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.AdminDistances>`
.. attribute:: advertise_passive_only
If enabled, advertise prefixes of passive interfaces only
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: apply_weight
Apply weights to UCMP or ECMP only
**type**\: :py:class:`IsisApplyWeightEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisApplyWeightEnum>`
.. attribute:: attached_bit
Set the attached bit in this router's level 1 System LSP
**type**\: :py:class:`IsisAttachedBitEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAttachedBitEnum>`
.. attribute:: default_admin_distance
Default IS\-IS administrative distance configuration
**type**\: int
**range:** 1..255
.. attribute:: default_information
Control origination of a default route with the option of using a policy. If no policy is specified the default route is advertised with zero cost in level 2 only
**type**\: :py:class:`DefaultInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.DefaultInformation>`
.. attribute:: frr_table
Fast\-ReRoute configuration
**type**\: :py:class:`FrrTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable>`
.. attribute:: ignore_attached_bit
If TRUE, Ignore other routers attached bit
**type**\: bool
.. attribute:: ispf
ISPF configuration
**type**\: :py:class:`Ispf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Ispf>`
.. attribute:: max_redist_prefixes
Maximum number of redistributed prefixesconfiguration
**type**\: :py:class:`MaxRedistPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes>`
.. attribute:: maximum_paths
Maximum number of active parallel paths per route
**type**\: int
**range:** 1..64
.. attribute:: metric_styles
Metric\-style configuration
**type**\: :py:class:`MetricStyles <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.MetricStyles>`
.. attribute:: metrics
Metric configuration
**type**\: :py:class:`Metrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Metrics>`
.. attribute:: micro_loop_avoidance
Micro Loop Avoidance configuration
**type**\: :py:class:`MicroLoopAvoidance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.MicroLoopAvoidance>`
.. attribute:: monitor_convergence
Enable convergence monitoring
**type**\: :py:class:`MonitorConvergence <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.MonitorConvergence>`
.. attribute:: mpls
MPLS configuration. MPLS configuration will only be applied for the IPv4\-unicast address\-family
**type**\: :py:class:`Mpls <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Mpls>`
.. attribute:: mpls_ldp_global
MPLS LDP configuration. MPLS LDP configuration will only be applied for the IPv4\-unicast address\-family
**type**\: :py:class:`MplsLdpGlobal <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.MplsLdpGlobal>`
.. attribute:: propagations
Route propagation configuration
**type**\: :py:class:`Propagations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Propagations>`
.. attribute:: redistributions
Protocol redistribution configuration
**type**\: :py:class:`Redistributions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Redistributions>`
.. attribute:: route_source_first_hop
If TRUE, routes will be installed with the IP address of the first\-hop node as the source instead of the originating node
**type**\: bool
.. attribute:: segment_routing
Enable Segment Routing configuration
**type**\: :py:class:`SegmentRouting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting>`
.. attribute:: single_topology
Run IPv6 Unicast using the standard (IPv4 Unicast) topology
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: spf_intervals
SPF\-interval configuration
**type**\: :py:class:`SpfIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals>`
.. attribute:: spf_periodic_intervals
Peoridic SPF configuration
**type**\: :py:class:`SpfPeriodicIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals>`
.. attribute:: spf_prefix_priorities
SPF Prefix Priority configuration
**type**\: :py:class:`SpfPrefixPriorities <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities>`
.. attribute:: summary_prefixes
Summary\-prefix configuration
**type**\: :py:class:`SummaryPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes>`
.. attribute:: topology_id
Set the topology ID for a named (non\-default) topology. This object must be set before any other configuration is supplied for a named (non\-default) topology , and must be the last configuration object to be removed. This item should not be supplied for the non\-named default topologies
**type**\: int
**range:** 6..4095
.. attribute:: ucmp
UCMP (UnEqual Cost MultiPath) configuration
**type**\: :py:class:`Ucmp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Ucmp>`
.. attribute:: weights
Weight configuration
**type**\: :py:class:`Weights <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Weights>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.adjacency_check = None
self.admin_distances = Isis.Instances.Instance.Afs.Af.AfData.AdminDistances()
self.admin_distances.parent = self
self.advertise_passive_only = None
self.apply_weight = None
self.attached_bit = None
self.default_admin_distance = None
self.default_information = Isis.Instances.Instance.Afs.Af.AfData.DefaultInformation()
self.default_information.parent = self
self.frr_table = Isis.Instances.Instance.Afs.Af.AfData.FrrTable()
self.frr_table.parent = self
self.ignore_attached_bit = None
self.ispf = Isis.Instances.Instance.Afs.Af.AfData.Ispf()
self.ispf.parent = self
self.max_redist_prefixes = Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes()
self.max_redist_prefixes.parent = self
self.maximum_paths = None
self.metric_styles = Isis.Instances.Instance.Afs.Af.AfData.MetricStyles()
self.metric_styles.parent = self
self.metrics = Isis.Instances.Instance.Afs.Af.AfData.Metrics()
self.metrics.parent = self
self.micro_loop_avoidance = Isis.Instances.Instance.Afs.Af.AfData.MicroLoopAvoidance()
self.micro_loop_avoidance.parent = self
self.monitor_convergence = Isis.Instances.Instance.Afs.Af.AfData.MonitorConvergence()
self.monitor_convergence.parent = self
self.mpls = Isis.Instances.Instance.Afs.Af.AfData.Mpls()
self.mpls.parent = self
self.mpls_ldp_global = Isis.Instances.Instance.Afs.Af.AfData.MplsLdpGlobal()
self.mpls_ldp_global.parent = self
self.propagations = Isis.Instances.Instance.Afs.Af.AfData.Propagations()
self.propagations.parent = self
self.redistributions = Isis.Instances.Instance.Afs.Af.AfData.Redistributions()
self.redistributions.parent = self
self.route_source_first_hop = None
self.segment_routing = Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting()
self.segment_routing.parent = self
self.single_topology = None
self.spf_intervals = Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals()
self.spf_intervals.parent = self
self.spf_periodic_intervals = Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals()
self.spf_periodic_intervals.parent = self
self.spf_prefix_priorities = Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities()
self.spf_prefix_priorities.parent = self
self.summary_prefixes = Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes()
self.summary_prefixes.parent = self
self.topology_id = None
self.ucmp = Isis.Instances.Instance.Afs.Af.AfData.Ucmp()
self.ucmp.parent = self
self.weights = Isis.Instances.Instance.Afs.Af.AfData.Weights()
self.weights.parent = self
class SegmentRouting(object):
"""
Enable Segment Routing configuration
.. attribute:: mpls
Prefer segment routing labels over LDP labels
**type**\: :py:class:`IsisLabelPreferenceEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisLabelPreferenceEnum>`
.. attribute:: prefix_sid_map
Enable Segment Routing prefix SID map configuration
**type**\: :py:class:`PrefixSidMap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting.PrefixSidMap>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mpls = None
self.prefix_sid_map = Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting.PrefixSidMap()
self.prefix_sid_map.parent = self
class PrefixSidMap(object):
"""
Enable Segment Routing prefix SID map
configuration
.. attribute:: advertise_local
Enable Segment Routing prefix SID map advertise local
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: receive
If TRUE, remote prefix SID map advertisements will be used. If FALSE, they will not be used
**type**\: bool
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.advertise_local = None
self.receive = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:prefix-sid-map'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.advertise_local is not None:
return True
if self.receive is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting.PrefixSidMap']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:segment-routing'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.mpls is not None:
return True
if self.prefix_sid_map is not None and self.prefix_sid_map._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SegmentRouting']['meta_info']
class MetricStyles(object):
"""
Metric\-style configuration
.. attribute:: metric_style
Configuration of metric style in LSPs
**type**\: list of :py:class:`MetricStyle <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.MetricStyles.MetricStyle>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.metric_style = YList()
self.metric_style.parent = self
self.metric_style.name = 'metric_style'
class MetricStyle(object):
"""
Configuration of metric style in LSPs
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: style
Metric Style
**type**\: :py:class:`IsisMetricStyleEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricStyleEnum>`
.. attribute:: transition_state
Transition state
**type**\: :py:class:`IsisMetricStyleTransitionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricStyleTransitionEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.style = None
self.transition_state = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metric-style[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.style is not None:
return True
if self.transition_state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.MetricStyles.MetricStyle']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metric-styles'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.metric_style is not None:
for child_ref in self.metric_style:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.MetricStyles']['meta_info']
class FrrTable(object):
"""
Fast\-ReRoute configuration
.. attribute:: frr_load_sharings
Load share prefixes across multiple backups
**type**\: :py:class:`FrrLoadSharings <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings>`
.. attribute:: frr_remote_lfa_prefixes
FRR remote LFA prefix list filter configuration
**type**\: :py:class:`FrrRemoteLfaPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes>`
.. attribute:: frr_tiebreakers
FRR tiebreakers configuration
**type**\: :py:class:`FrrTiebreakers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers>`
.. attribute:: frr_use_cand_onlies
FRR use candidate only configuration
**type**\: :py:class:`FrrUseCandOnlies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies>`
.. attribute:: priority_limits
FRR prefix\-limit configuration
**type**\: :py:class:`PriorityLimits <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_load_sharings = Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings()
self.frr_load_sharings.parent = self
self.frr_remote_lfa_prefixes = Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes()
self.frr_remote_lfa_prefixes.parent = self
self.frr_tiebreakers = Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers()
self.frr_tiebreakers.parent = self
self.frr_use_cand_onlies = Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies()
self.frr_use_cand_onlies.parent = self
self.priority_limits = Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits()
self.priority_limits.parent = self
class FrrLoadSharings(object):
"""
Load share prefixes across multiple
backups
.. attribute:: frr_load_sharing
Disable load sharing
**type**\: list of :py:class:`FrrLoadSharing <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings.FrrLoadSharing>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_load_sharing = YList()
self.frr_load_sharing.parent = self
self.frr_load_sharing.name = 'frr_load_sharing'
class FrrLoadSharing(object):
"""
Disable load sharing
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: load_sharing
Load sharing
**type**\: :py:class:`IsisfrrLoadSharingEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrLoadSharingEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.load_sharing = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-load-sharing[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.load_sharing is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings.FrrLoadSharing']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-load-sharings'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_load_sharing is not None:
for child_ref in self.frr_load_sharing:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrLoadSharings']['meta_info']
class PriorityLimits(object):
"""
FRR prefix\-limit configuration
.. attribute:: priority_limit
Limit backup computation upto the prefix priority
**type**\: list of :py:class:`PriorityLimit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits.PriorityLimit>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.priority_limit = YList()
self.priority_limit.parent = self
self.priority_limit.name = 'priority_limit'
class PriorityLimit(object):
"""
Limit backup computation upto the prefix
priority
.. attribute:: frr_type <key>
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: priority
Compute for all prefixes upto the specified priority
**type**\: :py:class:`IsisPrefixPriorityEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisPrefixPriorityEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = None
self.level = None
self.priority = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.frr_type is None:
raise YPYModelError('Key property frr_type is None')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:priority-limit[Cisco-IOS-XR-clns-isis-cfg:frr-type = ' + str(self.frr_type) + '][Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
return True
if self.level is not None:
return True
if self.priority is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits.PriorityLimit']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:priority-limits'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.priority_limit is not None:
for child_ref in self.priority_limit:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.PriorityLimits']['meta_info']
class FrrRemoteLfaPrefixes(object):
"""
FRR remote LFA prefix list filter
configuration
.. attribute:: frr_remote_lfa_prefix
Filter remote LFA router IDs using prefix\-list
**type**\: list of :py:class:`FrrRemoteLfaPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_remote_lfa_prefix = YList()
self.frr_remote_lfa_prefix.parent = self
self.frr_remote_lfa_prefix.name = 'frr_remote_lfa_prefix'
class FrrRemoteLfaPrefix(object):
"""
Filter remote LFA router IDs using
prefix\-list
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: prefix_list_name
Name of the prefix list
**type**\: str
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.prefix_list_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-prefix[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.prefix_list_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_remote_lfa_prefix is not None:
for child_ref in self.frr_remote_lfa_prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrRemoteLfaPrefixes']['meta_info']
class FrrTiebreakers(object):
"""
FRR tiebreakers configuration
.. attribute:: frr_tiebreaker
Configure tiebreaker for multiple backups
**type**\: list of :py:class:`FrrTiebreaker <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers.FrrTiebreaker>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_tiebreaker = YList()
self.frr_tiebreaker.parent = self
self.frr_tiebreaker.name = 'frr_tiebreaker'
class FrrTiebreaker(object):
"""
Configure tiebreaker for multiple backups
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: tiebreaker <key>
Tiebreaker for which configuration applies
**type**\: :py:class:`IsisfrrTiebreakerEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrTiebreakerEnum>`
.. attribute:: index
Preference order among tiebreakers
**type**\: int
**range:** 1..255
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.tiebreaker = None
self.index = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
if self.tiebreaker is None:
raise YPYModelError('Key property tiebreaker is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-tiebreaker[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + '][Cisco-IOS-XR-clns-isis-cfg:tiebreaker = ' + str(self.tiebreaker) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.tiebreaker is not None:
return True
if self.index is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers.FrrTiebreaker']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-tiebreakers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_tiebreaker is not None:
for child_ref in self.frr_tiebreaker:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrTiebreakers']['meta_info']
class FrrUseCandOnlies(object):
"""
FRR use candidate only configuration
.. attribute:: frr_use_cand_only
Configure use candidate only to exclude interfaces as backup
**type**\: list of :py:class:`FrrUseCandOnly <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies.FrrUseCandOnly>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_use_cand_only = YList()
self.frr_use_cand_only.parent = self
self.frr_use_cand_only.name = 'frr_use_cand_only'
class FrrUseCandOnly(object):
"""
Configure use candidate only to exclude
interfaces as backup
.. attribute:: frr_type <key>
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.frr_type is None:
raise YPYModelError('Key property frr_type is None')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-use-cand-only[Cisco-IOS-XR-clns-isis-cfg:frr-type = ' + str(self.frr_type) + '][Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
return True
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies.FrrUseCandOnly']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-use-cand-onlies'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_use_cand_only is not None:
for child_ref in self.frr_use_cand_only:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable.FrrUseCandOnlies']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_load_sharings is not None and self.frr_load_sharings._has_data():
return True
if self.frr_remote_lfa_prefixes is not None and self.frr_remote_lfa_prefixes._has_data():
return True
if self.frr_tiebreakers is not None and self.frr_tiebreakers._has_data():
return True
if self.frr_use_cand_onlies is not None and self.frr_use_cand_onlies._has_data():
return True
if self.priority_limits is not None and self.priority_limits._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.FrrTable']['meta_info']
class SpfPrefixPriorities(object):
"""
SPF Prefix Priority configuration
.. attribute:: spf_prefix_priority
Determine SPF priority for prefixes
**type**\: list of :py:class:`SpfPrefixPriority <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities.SpfPrefixPriority>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.spf_prefix_priority = YList()
self.spf_prefix_priority.parent = self
self.spf_prefix_priority.name = 'spf_prefix_priority'
class SpfPrefixPriority(object):
"""
Determine SPF priority for prefixes
.. attribute:: level <key>
SPF Level for prefix prioritization
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: prefix_priority_type <key>
SPF Priority to assign matching prefixes
**type**\: :py:class:`IsisPrefixPriorityEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisPrefixPriorityEnum>`
.. attribute:: access_list_name
Access List to determine prefixes for this priority
**type**\: str
.. attribute:: admin_tag
Tag value to determine prefixes for this priority
**type**\: int
**range:** 1..4294967295
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.prefix_priority_type = None
self.access_list_name = None
self.admin_tag = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
if self.prefix_priority_type is None:
raise YPYModelError('Key property prefix_priority_type is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-prefix-priority[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + '][Cisco-IOS-XR-clns-isis-cfg:prefix-priority-type = ' + str(self.prefix_priority_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.prefix_priority_type is not None:
return True
if self.access_list_name is not None:
return True
if self.admin_tag is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities.SpfPrefixPriority']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-prefix-priorities'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.spf_prefix_priority is not None:
for child_ref in self.spf_prefix_priority:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPrefixPriorities']['meta_info']
class SummaryPrefixes(object):
"""
Summary\-prefix configuration
.. attribute:: summary_prefix
Configure IP address prefixes to advertise
**type**\: list of :py:class:`SummaryPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes.SummaryPrefix>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.summary_prefix = YList()
self.summary_prefix.parent = self
self.summary_prefix.name = 'summary_prefix'
class SummaryPrefix(object):
"""
Configure IP address prefixes to advertise
.. attribute:: address_prefix <key>
IP summary address prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
.. attribute:: level
Level in which to summarize routes
**type**\: int
**range:** 1..2
.. attribute:: tag
The tag value
**type**\: int
**range:** 1..4294967295
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address_prefix = None
self.level = None
self.tag = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.address_prefix is None:
raise YPYModelError('Key property address_prefix is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:summary-prefix[Cisco-IOS-XR-clns-isis-cfg:address-prefix = ' + str(self.address_prefix) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address_prefix is not None:
return True
if self.level is not None:
return True
if self.tag is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes.SummaryPrefix']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:summary-prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.summary_prefix is not None:
for child_ref in self.summary_prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SummaryPrefixes']['meta_info']
class MicroLoopAvoidance(object):
"""
Micro Loop Avoidance configuration
.. attribute:: enable
MicroLoop avoidance enable configuration
**type**\: :py:class:`IsisMicroLoopAvoidanceEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMicroLoopAvoidanceEnum>`
.. attribute:: rib_update_delay
Value of delay in msecs in updating RIB
**type**\: int
**range:** 1000..65535
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
self.rib_update_delay = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:micro-loop-avoidance'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.rib_update_delay is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.MicroLoopAvoidance']['meta_info']
class Ucmp(object):
"""
UCMP (UnEqual Cost MultiPath) configuration
.. attribute:: delay_interval
Delay in msecs between primary SPF and UCMP computation
**type**\: int
**range:** 100..65535
.. attribute:: enable
UCMP feature enable configuration
**type**\: :py:class:`Enable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Ucmp.Enable>`
.. attribute:: exclude_interfaces
Interfaces excluded from UCMP path computation
**type**\: :py:class:`ExcludeInterfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.delay_interval = None
self.enable = Isis.Instances.Instance.Afs.Af.AfData.Ucmp.Enable()
self.enable.parent = self
self.exclude_interfaces = Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces()
self.exclude_interfaces.parent = self
class Enable(object):
"""
UCMP feature enable configuration
.. attribute:: prefix_list_name
Name of the Prefix List
**type**\: str
.. attribute:: variance
Value of variance
**type**\: int
**range:** 101..10000
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.variance = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:enable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.prefix_list_name is not None:
return True
if self.variance is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp.Enable']['meta_info']
class ExcludeInterfaces(object):
"""
Interfaces excluded from UCMP path
computation
.. attribute:: exclude_interface
Exclude this interface from UCMP path computation
**type**\: list of :py:class:`ExcludeInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces.ExcludeInterface>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.exclude_interface = YList()
self.exclude_interface.parent = self
self.exclude_interface.name = 'exclude_interface'
class ExcludeInterface(object):
"""
Exclude this interface from UCMP path
computation
.. attribute:: interface_name <key>
Name of the interface to be excluded
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:exclude-interface[Cisco-IOS-XR-clns-isis-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces.ExcludeInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:exclude-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.exclude_interface is not None:
for child_ref in self.exclude_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp.ExcludeInterfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:ucmp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.delay_interval is not None:
return True
if self.enable is not None and self.enable._has_data():
return True
if self.exclude_interfaces is not None and self.exclude_interfaces._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ucmp']['meta_info']
class MaxRedistPrefixes(object):
"""
Maximum number of redistributed
prefixesconfiguration
.. attribute:: max_redist_prefix
An upper limit on the number of redistributed prefixes which may be included in the local system's LSP
**type**\: list of :py:class:`MaxRedistPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes.MaxRedistPrefix>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.max_redist_prefix = YList()
self.max_redist_prefix.parent = self
self.max_redist_prefix.name = 'max_redist_prefix'
class MaxRedistPrefix(object):
"""
An upper limit on the number of
redistributed prefixes which may be
included in the local system's LSP
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: prefix_limit
Max number of prefixes
**type**\: int
**range:** 1..28000
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.prefix_limit = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:max-redist-prefix[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.prefix_limit is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes.MaxRedistPrefix']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:max-redist-prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.max_redist_prefix is not None:
for child_ref in self.max_redist_prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.MaxRedistPrefixes']['meta_info']
class Propagations(object):
"""
Route propagation configuration
.. attribute:: propagation
Propagate routes between IS\-IS levels
**type**\: list of :py:class:`Propagation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Propagations.Propagation>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.propagation = YList()
self.propagation.parent = self
self.propagation.name = 'propagation'
class Propagation(object):
"""
Propagate routes between IS\-IS levels
.. attribute:: destination_level <key>
Destination level for routes. Must differ from SourceLevel
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: source_level <key>
Source level for routes
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: route_policy_name
Route policy limiting routes to be propagated
**type**\: str
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.destination_level = None
self.source_level = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.destination_level is None:
raise YPYModelError('Key property destination_level is None')
if self.source_level is None:
raise YPYModelError('Key property source_level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:propagation[Cisco-IOS-XR-clns-isis-cfg:destination-level = ' + str(self.destination_level) + '][Cisco-IOS-XR-clns-isis-cfg:source-level = ' + str(self.source_level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.destination_level is not None:
return True
if self.source_level is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Propagations.Propagation']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:propagations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.propagation is not None:
for child_ref in self.propagation:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Propagations']['meta_info']
class Redistributions(object):
"""
Protocol redistribution configuration
.. attribute:: redistribution
Redistribution of other protocols into this IS\-IS instance
**type**\: list of :py:class:`Redistribution <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.redistribution = YList()
self.redistribution.parent = self
self.redistribution.name = 'redistribution'
class Redistribution(object):
"""
Redistribution of other protocols into
this IS\-IS instance
.. attribute:: protocol_name <key>
The protocol to be redistributed. OSPFv3 may not be specified for an IPv4 topology and OSPF may not be specified for an IPv6 topology
**type**\: :py:class:`IsisRedistProtoEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisRedistProtoEnum>`
.. attribute:: bgp
bgp
**type**\: list of :py:class:`Bgp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Bgp>`
.. attribute:: connected_or_static_or_rip_or_subscriber_or_mobile
connected or static or rip or subscriber or mobile
**type**\: :py:class:`ConnectedOrStaticOrRipOrSubscriberOrMobile <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile>`
.. attribute:: eigrp
eigrp
**type**\: list of :py:class:`Eigrp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Eigrp>`
.. attribute:: ospf_or_ospfv3_or_isis_or_application
ospf or ospfv3 or isis or application
**type**\: list of :py:class:`OspfOrOspfv3OrIsisOrApplication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.protocol_name = None
self.bgp = YList()
self.bgp.parent = self
self.bgp.name = 'bgp'
self.connected_or_static_or_rip_or_subscriber_or_mobile = None
self.eigrp = YList()
self.eigrp.parent = self
self.eigrp.name = 'eigrp'
self.ospf_or_ospfv3_or_isis_or_application = YList()
self.ospf_or_ospfv3_or_isis_or_application.parent = self
self.ospf_or_ospfv3_or_isis_or_application.name = 'ospf_or_ospfv3_or_isis_or_application'
class ConnectedOrStaticOrRipOrSubscriberOrMobile(object):
"""
connected or static or rip or subscriber
or mobile
.. attribute:: levels
Levels to redistribute routes into
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: metric
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: metric_type
IS\-IS metric type
**type**\: :py:class:`IsisMetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricEnum>`
.. attribute:: ospf_route_type
OSPF route types to redistribute. May only be specified if Protocol is OSPF
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: route_policy_name
Route policy to control redistribution
**type**\: str
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.levels = None
self.metric = None
self.metric_type = None
self.ospf_route_type = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:connected-or-static-or-rip-or-subscriber-or-mobile'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.levels is not None:
return True
if self.metric is not None:
return True
if self.metric_type is not None:
return True
if self.ospf_route_type is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile']['meta_info']
class OspfOrOspfv3OrIsisOrApplication(object):
"""
ospf or ospfv3 or isis or application
.. attribute:: instance_name <key>
Protocol Instance Identifier. Mandatory for ISIS, OSPF and application, must not be specified otherwise
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: levels
Levels to redistribute routes into
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: metric
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: metric_type
IS\-IS metric type
**type**\: :py:class:`IsisMetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricEnum>`
.. attribute:: ospf_route_type
OSPF route types to redistribute. May only be specified if Protocol is OSPF
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: route_policy_name
Route policy to control redistribution
**type**\: str
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.instance_name = None
self.levels = None
self.metric = None
self.metric_type = None
self.ospf_route_type = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.instance_name is None:
raise YPYModelError('Key property instance_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:ospf-or-ospfv3-or-isis-or-application[Cisco-IOS-XR-clns-isis-cfg:instance-name = ' + str(self.instance_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.instance_name is not None:
return True
if self.levels is not None:
return True
if self.metric is not None:
return True
if self.metric_type is not None:
return True
if self.ospf_route_type is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication']['meta_info']
class Bgp(object):
"""
bgp
.. attribute:: as_xx <key>
First half of BGP AS number in XX.YY format. Mandatory if Protocol is BGP and must not be specified otherwise. Must be a non\-zero value if second half is zero
**type**\: int
**range:** 0..65535
.. attribute:: as_yy <key>
Second half of BGP AS number in XX.YY format. Mandatory if Protocol is BGP and must not be specified otherwise. Must be a non\-zero value if first half is zero
**type**\: int
**range:** 0..4294967295
.. attribute:: levels
Levels to redistribute routes into
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: metric
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: metric_type
IS\-IS metric type
**type**\: :py:class:`IsisMetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricEnum>`
.. attribute:: ospf_route_type
OSPF route types to redistribute. May only be specified if Protocol is OSPF
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: route_policy_name
Route policy to control redistribution
**type**\: str
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.as_xx = None
self.as_yy = None
self.levels = None
self.metric = None
self.metric_type = None
self.ospf_route_type = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.as_xx is None:
raise YPYModelError('Key property as_xx is None')
if self.as_yy is None:
raise YPYModelError('Key property as_yy is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:bgp[Cisco-IOS-XR-clns-isis-cfg:as-xx = ' + str(self.as_xx) + '][Cisco-IOS-XR-clns-isis-cfg:as-yy = ' + str(self.as_yy) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.as_xx is not None:
return True
if self.as_yy is not None:
return True
if self.levels is not None:
return True
if self.metric is not None:
return True
if self.metric_type is not None:
return True
if self.ospf_route_type is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Bgp']['meta_info']
class Eigrp(object):
"""
eigrp
.. attribute:: as_zz <key>
Eigrp as number
**type**\: int
**range:** 1..65535
.. attribute:: levels
Levels to redistribute routes into
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: metric
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: metric_type
IS\-IS metric type
**type**\: :py:class:`IsisMetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricEnum>`
.. attribute:: ospf_route_type
OSPF route types to redistribute. May only be specified if Protocol is OSPF
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: route_policy_name
Route policy to control redistribution
**type**\: str
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.as_zz = None
self.levels = None
self.metric = None
self.metric_type = None
self.ospf_route_type = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.as_zz is None:
raise YPYModelError('Key property as_zz is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:eigrp[Cisco-IOS-XR-clns-isis-cfg:as-zz = ' + str(self.as_zz) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.as_zz is not None:
return True
if self.levels is not None:
return True
if self.metric is not None:
return True
if self.metric_type is not None:
return True
if self.ospf_route_type is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution.Eigrp']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.protocol_name is None:
raise YPYModelError('Key property protocol_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:redistribution[Cisco-IOS-XR-clns-isis-cfg:protocol-name = ' + str(self.protocol_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.protocol_name is not None:
return True
if self.bgp is not None:
for child_ref in self.bgp:
if child_ref._has_data():
return True
if self.connected_or_static_or_rip_or_subscriber_or_mobile is not None and self.connected_or_static_or_rip_or_subscriber_or_mobile._has_data():
return True
if self.eigrp is not None:
for child_ref in self.eigrp:
if child_ref._has_data():
return True
if self.ospf_or_ospfv3_or_isis_or_application is not None:
for child_ref in self.ospf_or_ospfv3_or_isis_or_application:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions.Redistribution']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:redistributions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.redistribution is not None:
for child_ref in self.redistribution:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Redistributions']['meta_info']
class SpfPeriodicIntervals(object):
"""
Peoridic SPF configuration
.. attribute:: spf_periodic_interval
Maximum interval between spf runs
**type**\: list of :py:class:`SpfPeriodicInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals.SpfPeriodicInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.spf_periodic_interval = YList()
self.spf_periodic_interval.parent = self
self.spf_periodic_interval.name = 'spf_periodic_interval'
class SpfPeriodicInterval(object):
"""
Maximum interval between spf runs
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: periodic_interval
Maximum interval in between SPF runs in seconds
**type**\: int
**range:** 0..3600
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.periodic_interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-periodic-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.periodic_interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals.SpfPeriodicInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-periodic-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.spf_periodic_interval is not None:
for child_ref in self.spf_periodic_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfPeriodicIntervals']['meta_info']
class SpfIntervals(object):
"""
SPF\-interval configuration
.. attribute:: spf_interval
Route calculation scheduling parameters
**type**\: list of :py:class:`SpfInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals.SpfInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.spf_interval = YList()
self.spf_interval.parent = self
self.spf_interval.name = 'spf_interval'
class SpfInterval(object):
"""
Route calculation scheduling parameters
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: initial_wait
Initial wait before running a route calculation in milliseconds
**type**\: int
**range:** 0..120000
.. attribute:: maximum_wait
Maximum wait before running a route calculation in milliseconds
**type**\: int
**range:** 0..120000
.. attribute:: secondary_wait
Secondary wait before running a route calculation in milliseconds
**type**\: int
**range:** 0..120000
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.initial_wait = None
self.maximum_wait = None
self.secondary_wait = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.initial_wait is not None:
return True
if self.maximum_wait is not None:
return True
if self.secondary_wait is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals.SpfInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.spf_interval is not None:
for child_ref in self.spf_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.SpfIntervals']['meta_info']
class MonitorConvergence(object):
"""
Enable convergence monitoring
.. attribute:: enable
Enable convergence monitoring
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: prefix_list
Enable the monitoring of individual prefixes (prefix list name)
**type**\: str
.. attribute:: track_ip_frr
Enable the Tracking of IP\-Frr Convergence
**type**\: :py:class:`Empty <ydk.types.Empty>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
self.prefix_list = None
self.track_ip_frr = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:monitor-convergence'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.prefix_list is not None:
return True
if self.track_ip_frr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.MonitorConvergence']['meta_info']
class DefaultInformation(object):
"""
Control origination of a default route with
the option of using a policy. If no policy
is specified the default route is
advertised with zero cost in level 2 only.
.. attribute:: external
Flag to indicate that the default prefix should be originated as an external route
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: policy_name
Policy name
**type**\: str
.. attribute:: use_policy
Flag to indicate whether default origination is controlled using a policy
**type**\: bool
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.external = None
self.policy_name = None
self.use_policy = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:default-information'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.external is not None:
return True
if self.policy_name is not None:
return True
if self.use_policy is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.DefaultInformation']['meta_info']
class AdminDistances(object):
"""
Per\-route administrative
distanceconfiguration
.. attribute:: admin_distance
Administrative distance configuration. The supplied distance is applied to all routes discovered from the specified source, or only those that match the supplied prefix list if this is specified
**type**\: list of :py:class:`AdminDistance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.AdminDistances.AdminDistance>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.admin_distance = YList()
self.admin_distance.parent = self
self.admin_distance.name = 'admin_distance'
class AdminDistance(object):
"""
Administrative distance configuration. The
supplied distance is applied to all routes
discovered from the specified source, or
only those that match the supplied prefix
list if this is specified
.. attribute:: address_prefix <key>
IP route source prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
.. attribute:: distance
Administrative distance
**type**\: int
**range:** 1..255
**mandatory**\: True
.. attribute:: prefix_list
List of prefixes to which this distance applies
**type**\: str
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address_prefix = None
self.distance = None
self.prefix_list = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.address_prefix is None:
raise YPYModelError('Key property address_prefix is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:admin-distance[Cisco-IOS-XR-clns-isis-cfg:address-prefix = ' + str(self.address_prefix) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address_prefix is not None:
return True
if self.distance is not None:
return True
if self.prefix_list is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.AdminDistances.AdminDistance']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:admin-distances'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_distance is not None:
for child_ref in self.admin_distance:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.AdminDistances']['meta_info']
class Ispf(object):
"""
ISPF configuration
.. attribute:: states
ISPF state (enable/disable)
**type**\: :py:class:`States <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Ispf.States>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.states = Isis.Instances.Instance.Afs.Af.AfData.Ispf.States()
self.states.parent = self
class States(object):
"""
ISPF state (enable/disable)
.. attribute:: state
Enable/disable ISPF
**type**\: list of :py:class:`State <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Ispf.States.State>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.state = YList()
self.state.parent = self
self.state.name = 'state'
class State(object):
"""
Enable/disable ISPF
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: state
State
**type**\: :py:class:`IsisispfStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisispfStateEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.state = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:state[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ispf.States.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:states'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.state is not None:
for child_ref in self.state:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ispf.States']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:ispf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.states is not None and self.states._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Ispf']['meta_info']
class MplsLdpGlobal(object):
"""
MPLS LDP configuration. MPLS LDP
configuration will only be applied for the
IPv4\-unicast address\-family.
.. attribute:: auto_config
If TRUE, LDP will be enabled onall IS\-IS interfaces enabled for this address\-family
**type**\: bool
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.auto_config = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:mpls-ldp-global'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.auto_config is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.MplsLdpGlobal']['meta_info']
class Mpls(object):
"""
MPLS configuration. MPLS configuration will
only be applied for the IPv4\-unicast
address\-family.
.. attribute:: igp_intact
Install TE and non\-TE nexthops in the RIB
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: level
Enable MPLS for an IS\-IS at the given levels
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: multicast_intact
Install non\-TE nexthops in the RIB for use by multicast
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: router_id
Traffic Engineering stable IP address for system
**type**\: :py:class:`RouterId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Mpls.RouterId>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.igp_intact = None
self.level = None
self.multicast_intact = None
self.router_id = Isis.Instances.Instance.Afs.Af.AfData.Mpls.RouterId()
self.router_id.parent = self
class RouterId(object):
"""
Traffic Engineering stable IP address for
system
.. attribute:: address
IPv4 address to be used as a router ID. Precisely one of Address and Interface must be specified
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: interface_name
Interface with designated stable IP address to be used as a router ID. This must be a Loopback interface. Precisely one of Address and Interface must be specified
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address = None
self.interface_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:router-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address is not None:
return True
if self.interface_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Mpls.RouterId']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:mpls'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.igp_intact is not None:
return True
if self.level is not None:
return True
if self.multicast_intact is not None:
return True
if self.router_id is not None and self.router_id._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Mpls']['meta_info']
class Metrics(object):
"""
Metric configuration
.. attribute:: metric
Metric configuration. Legal value depends on the metric\-style specified for the topology. If the metric\-style defined is narrow, then only a value between <1\-63> is allowed and if the metric\-style is defined as wide, then a value between <1\-16777215> is allowed as the metric value. All routers exclude links with the maximum wide metric (16777215) from their SPF
**type**\: list of :py:class:`Metric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Metrics.Metric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.metric = YList()
self.metric.parent = self
self.metric.name = 'metric'
class Metric(object):
"""
Metric configuration. Legal value depends on
the metric\-style specified for the topology. If
the metric\-style defined is narrow, then only a
value between <1\-63> is allowed and if the
metric\-style is defined as wide, then a value
between <1\-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: metric
Allowed metric\: <1\-63> for narrow, <1\-16777215> for wide
**type**\: one of the below types:
**type**\: :py:class:`MetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum>`
**mandatory**\: True
----
**type**\: int
**range:** 1..16777215
**mandatory**\: True
----
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.metric = None
class MetricEnum(Enum):
"""
MetricEnum
Allowed metric\: <1\-63> for narrow,
<1\-16777215> for wide
.. data:: MAXIMUM = 16777215
Maximum wide metric. All routers will
exclude this link from their SPF
"""
MAXIMUM = 16777215
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Metrics.Metric.MetricEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.metric is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Metrics.Metric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.metric is not None:
for child_ref in self.metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Metrics']['meta_info']
class Weights(object):
"""
Weight configuration
.. attribute:: weight
Weight configuration under interface for load balancing
**type**\: list of :py:class:`Weight <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.AfData.Weights.Weight>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.weight = YList()
self.weight.parent = self
self.weight.name = 'weight'
class Weight(object):
"""
Weight configuration under interface for load
balancing
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: weight
Weight to be configured under interface for Load Balancing. Allowed weight\: <1\-16777215>
**type**\: int
**range:** 1..16777214
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.weight = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:weight[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.weight is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Weights.Weight']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:weights'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.weight is not None:
for child_ref in self.weight:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData.Weights']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:af-data'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.adjacency_check is not None:
return True
if self.admin_distances is not None and self.admin_distances._has_data():
return True
if self.advertise_passive_only is not None:
return True
if self.apply_weight is not None:
return True
if self.attached_bit is not None:
return True
if self.default_admin_distance is not None:
return True
if self.default_information is not None and self.default_information._has_data():
return True
if self.frr_table is not None and self.frr_table._has_data():
return True
if self.ignore_attached_bit is not None:
return True
if self.ispf is not None and self.ispf._has_data():
return True
if self.max_redist_prefixes is not None and self.max_redist_prefixes._has_data():
return True
if self.maximum_paths is not None:
return True
if self.metric_styles is not None and self.metric_styles._has_data():
return True
if self.metrics is not None and self.metrics._has_data():
return True
if self.micro_loop_avoidance is not None and self.micro_loop_avoidance._has_data():
return True
if self.monitor_convergence is not None and self.monitor_convergence._has_data():
return True
if self.mpls is not None and self.mpls._has_data():
return True
if self.mpls_ldp_global is not None and self.mpls_ldp_global._has_data():
return True
if self.propagations is not None and self.propagations._has_data():
return True
if self.redistributions is not None and self.redistributions._has_data():
return True
if self.route_source_first_hop is not None:
return True
if self.segment_routing is not None and self.segment_routing._has_data():
return True
if self.single_topology is not None:
return True
if self.spf_intervals is not None and self.spf_intervals._has_data():
return True
if self.spf_periodic_intervals is not None and self.spf_periodic_intervals._has_data():
return True
if self.spf_prefix_priorities is not None and self.spf_prefix_priorities._has_data():
return True
if self.summary_prefixes is not None and self.summary_prefixes._has_data():
return True
if self.topology_id is not None:
return True
if self.ucmp is not None and self.ucmp._has_data():
return True
if self.weights is not None and self.weights._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.AfData']['meta_info']
class TopologyName(object):
"""
keys\: topology\-name
.. attribute:: topology_name <key>
Topology Name
**type**\: str
**range:** 0..32
.. attribute:: adjacency_check
Suppress check for consistent AF support on received IIHs
**type**\: :py:class:`IsisAdjCheckEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAdjCheckEnum>`
.. attribute:: admin_distances
Per\-route administrative distanceconfiguration
**type**\: :py:class:`AdminDistances <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances>`
.. attribute:: advertise_passive_only
If enabled, advertise prefixes of passive interfaces only
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: apply_weight
Apply weights to UCMP or ECMP only
**type**\: :py:class:`IsisApplyWeightEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisApplyWeightEnum>`
.. attribute:: attached_bit
Set the attached bit in this router's level 1 System LSP
**type**\: :py:class:`IsisAttachedBitEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAttachedBitEnum>`
.. attribute:: default_admin_distance
Default IS\-IS administrative distance configuration
**type**\: int
**range:** 1..255
.. attribute:: default_information
Control origination of a default route with the option of using a policy. If no policy is specified the default route is advertised with zero cost in level 2 only
**type**\: :py:class:`DefaultInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.DefaultInformation>`
.. attribute:: frr_table
Fast\-ReRoute configuration
**type**\: :py:class:`FrrTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable>`
.. attribute:: ignore_attached_bit
If TRUE, Ignore other routers attached bit
**type**\: bool
.. attribute:: ispf
ISPF configuration
**type**\: :py:class:`Ispf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Ispf>`
.. attribute:: max_redist_prefixes
Maximum number of redistributed prefixesconfiguration
**type**\: :py:class:`MaxRedistPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes>`
.. attribute:: maximum_paths
Maximum number of active parallel paths per route
**type**\: int
**range:** 1..64
.. attribute:: metric_styles
Metric\-style configuration
**type**\: :py:class:`MetricStyles <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles>`
.. attribute:: metrics
Metric configuration
**type**\: :py:class:`Metrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Metrics>`
.. attribute:: micro_loop_avoidance
Micro Loop Avoidance configuration
**type**\: :py:class:`MicroLoopAvoidance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.MicroLoopAvoidance>`
.. attribute:: monitor_convergence
Enable convergence monitoring
**type**\: :py:class:`MonitorConvergence <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.MonitorConvergence>`
.. attribute:: mpls
MPLS configuration. MPLS configuration will only be applied for the IPv4\-unicast address\-family
**type**\: :py:class:`Mpls <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Mpls>`
.. attribute:: mpls_ldp_global
MPLS LDP configuration. MPLS LDP configuration will only be applied for the IPv4\-unicast address\-family
**type**\: :py:class:`MplsLdpGlobal <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.MplsLdpGlobal>`
.. attribute:: propagations
Route propagation configuration
**type**\: :py:class:`Propagations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Propagations>`
.. attribute:: redistributions
Protocol redistribution configuration
**type**\: :py:class:`Redistributions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions>`
.. attribute:: route_source_first_hop
If TRUE, routes will be installed with the IP address of the first\-hop node as the source instead of the originating node
**type**\: bool
.. attribute:: segment_routing
Enable Segment Routing configuration
**type**\: :py:class:`SegmentRouting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting>`
.. attribute:: single_topology
Run IPv6 Unicast using the standard (IPv4 Unicast) topology
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: spf_intervals
SPF\-interval configuration
**type**\: :py:class:`SpfIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals>`
.. attribute:: spf_periodic_intervals
Peoridic SPF configuration
**type**\: :py:class:`SpfPeriodicIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals>`
.. attribute:: spf_prefix_priorities
SPF Prefix Priority configuration
**type**\: :py:class:`SpfPrefixPriorities <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities>`
.. attribute:: summary_prefixes
Summary\-prefix configuration
**type**\: :py:class:`SummaryPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes>`
.. attribute:: topology_id
Set the topology ID for a named (non\-default) topology. This object must be set before any other configuration is supplied for a named (non\-default) topology , and must be the last configuration object to be removed. This item should not be supplied for the non\-named default topologies
**type**\: int
**range:** 6..4095
.. attribute:: ucmp
UCMP (UnEqual Cost MultiPath) configuration
**type**\: :py:class:`Ucmp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp>`
.. attribute:: weights
Weight configuration
**type**\: :py:class:`Weights <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Weights>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.topology_name = None
self.adjacency_check = None
self.admin_distances = Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances()
self.admin_distances.parent = self
self.advertise_passive_only = None
self.apply_weight = None
self.attached_bit = None
self.default_admin_distance = None
self.default_information = Isis.Instances.Instance.Afs.Af.TopologyName.DefaultInformation()
self.default_information.parent = self
self.frr_table = Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable()
self.frr_table.parent = self
self.ignore_attached_bit = None
self.ispf = Isis.Instances.Instance.Afs.Af.TopologyName.Ispf()
self.ispf.parent = self
self.max_redist_prefixes = Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes()
self.max_redist_prefixes.parent = self
self.maximum_paths = None
self.metric_styles = Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles()
self.metric_styles.parent = self
self.metrics = Isis.Instances.Instance.Afs.Af.TopologyName.Metrics()
self.metrics.parent = self
self.micro_loop_avoidance = Isis.Instances.Instance.Afs.Af.TopologyName.MicroLoopAvoidance()
self.micro_loop_avoidance.parent = self
self.monitor_convergence = Isis.Instances.Instance.Afs.Af.TopologyName.MonitorConvergence()
self.monitor_convergence.parent = self
self.mpls = Isis.Instances.Instance.Afs.Af.TopologyName.Mpls()
self.mpls.parent = self
self.mpls_ldp_global = Isis.Instances.Instance.Afs.Af.TopologyName.MplsLdpGlobal()
self.mpls_ldp_global.parent = self
self.propagations = Isis.Instances.Instance.Afs.Af.TopologyName.Propagations()
self.propagations.parent = self
self.redistributions = Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions()
self.redistributions.parent = self
self.route_source_first_hop = None
self.segment_routing = Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting()
self.segment_routing.parent = self
self.single_topology = None
self.spf_intervals = Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals()
self.spf_intervals.parent = self
self.spf_periodic_intervals = Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals()
self.spf_periodic_intervals.parent = self
self.spf_prefix_priorities = Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities()
self.spf_prefix_priorities.parent = self
self.summary_prefixes = Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes()
self.summary_prefixes.parent = self
self.topology_id = None
self.ucmp = Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp()
self.ucmp.parent = self
self.weights = Isis.Instances.Instance.Afs.Af.TopologyName.Weights()
self.weights.parent = self
class SegmentRouting(object):
"""
Enable Segment Routing configuration
.. attribute:: mpls
Prefer segment routing labels over LDP labels
**type**\: :py:class:`IsisLabelPreferenceEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisLabelPreferenceEnum>`
.. attribute:: prefix_sid_map
Enable Segment Routing prefix SID map configuration
**type**\: :py:class:`PrefixSidMap <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting.PrefixSidMap>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.mpls = None
self.prefix_sid_map = Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting.PrefixSidMap()
self.prefix_sid_map.parent = self
class PrefixSidMap(object):
"""
Enable Segment Routing prefix SID map
configuration
.. attribute:: advertise_local
Enable Segment Routing prefix SID map advertise local
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: receive
If TRUE, remote prefix SID map advertisements will be used. If FALSE, they will not be used
**type**\: bool
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.advertise_local = None
self.receive = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:prefix-sid-map'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.advertise_local is not None:
return True
if self.receive is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting.PrefixSidMap']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:segment-routing'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.mpls is not None:
return True
if self.prefix_sid_map is not None and self.prefix_sid_map._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SegmentRouting']['meta_info']
class MetricStyles(object):
"""
Metric\-style configuration
.. attribute:: metric_style
Configuration of metric style in LSPs
**type**\: list of :py:class:`MetricStyle <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles.MetricStyle>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.metric_style = YList()
self.metric_style.parent = self
self.metric_style.name = 'metric_style'
class MetricStyle(object):
"""
Configuration of metric style in LSPs
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: style
Metric Style
**type**\: :py:class:`IsisMetricStyleEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricStyleEnum>`
.. attribute:: transition_state
Transition state
**type**\: :py:class:`IsisMetricStyleTransitionEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricStyleTransitionEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.style = None
self.transition_state = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metric-style[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.style is not None:
return True
if self.transition_state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles.MetricStyle']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metric-styles'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.metric_style is not None:
for child_ref in self.metric_style:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MetricStyles']['meta_info']
class FrrTable(object):
"""
Fast\-ReRoute configuration
.. attribute:: frr_load_sharings
Load share prefixes across multiple backups
**type**\: :py:class:`FrrLoadSharings <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings>`
.. attribute:: frr_remote_lfa_prefixes
FRR remote LFA prefix list filter configuration
**type**\: :py:class:`FrrRemoteLfaPrefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes>`
.. attribute:: frr_tiebreakers
FRR tiebreakers configuration
**type**\: :py:class:`FrrTiebreakers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers>`
.. attribute:: frr_use_cand_onlies
FRR use candidate only configuration
**type**\: :py:class:`FrrUseCandOnlies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies>`
.. attribute:: priority_limits
FRR prefix\-limit configuration
**type**\: :py:class:`PriorityLimits <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_load_sharings = Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings()
self.frr_load_sharings.parent = self
self.frr_remote_lfa_prefixes = Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes()
self.frr_remote_lfa_prefixes.parent = self
self.frr_tiebreakers = Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers()
self.frr_tiebreakers.parent = self
self.frr_use_cand_onlies = Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies()
self.frr_use_cand_onlies.parent = self
self.priority_limits = Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits()
self.priority_limits.parent = self
class FrrLoadSharings(object):
"""
Load share prefixes across multiple
backups
.. attribute:: frr_load_sharing
Disable load sharing
**type**\: list of :py:class:`FrrLoadSharing <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings.FrrLoadSharing>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_load_sharing = YList()
self.frr_load_sharing.parent = self
self.frr_load_sharing.name = 'frr_load_sharing'
class FrrLoadSharing(object):
"""
Disable load sharing
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: load_sharing
Load sharing
**type**\: :py:class:`IsisfrrLoadSharingEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrLoadSharingEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.load_sharing = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-load-sharing[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.load_sharing is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings.FrrLoadSharing']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-load-sharings'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_load_sharing is not None:
for child_ref in self.frr_load_sharing:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrLoadSharings']['meta_info']
class PriorityLimits(object):
"""
FRR prefix\-limit configuration
.. attribute:: priority_limit
Limit backup computation upto the prefix priority
**type**\: list of :py:class:`PriorityLimit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits.PriorityLimit>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.priority_limit = YList()
self.priority_limit.parent = self
self.priority_limit.name = 'priority_limit'
class PriorityLimit(object):
"""
Limit backup computation upto the prefix
priority
.. attribute:: frr_type <key>
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: priority
Compute for all prefixes upto the specified priority
**type**\: :py:class:`IsisPrefixPriorityEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisPrefixPriorityEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = None
self.level = None
self.priority = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.frr_type is None:
raise YPYModelError('Key property frr_type is None')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:priority-limit[Cisco-IOS-XR-clns-isis-cfg:frr-type = ' + str(self.frr_type) + '][Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
return True
if self.level is not None:
return True
if self.priority is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits.PriorityLimit']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:priority-limits'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.priority_limit is not None:
for child_ref in self.priority_limit:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.PriorityLimits']['meta_info']
class FrrRemoteLfaPrefixes(object):
"""
FRR remote LFA prefix list filter
configuration
.. attribute:: frr_remote_lfa_prefix
Filter remote LFA router IDs using prefix\-list
**type**\: list of :py:class:`FrrRemoteLfaPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_remote_lfa_prefix = YList()
self.frr_remote_lfa_prefix.parent = self
self.frr_remote_lfa_prefix.name = 'frr_remote_lfa_prefix'
class FrrRemoteLfaPrefix(object):
"""
Filter remote LFA router IDs using
prefix\-list
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: prefix_list_name
Name of the prefix list
**type**\: str
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.prefix_list_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-prefix[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.prefix_list_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes.FrrRemoteLfaPrefix']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_remote_lfa_prefix is not None:
for child_ref in self.frr_remote_lfa_prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrRemoteLfaPrefixes']['meta_info']
class FrrTiebreakers(object):
"""
FRR tiebreakers configuration
.. attribute:: frr_tiebreaker
Configure tiebreaker for multiple backups
**type**\: list of :py:class:`FrrTiebreaker <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers.FrrTiebreaker>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_tiebreaker = YList()
self.frr_tiebreaker.parent = self
self.frr_tiebreaker.name = 'frr_tiebreaker'
class FrrTiebreaker(object):
"""
Configure tiebreaker for multiple backups
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: tiebreaker <key>
Tiebreaker for which configuration applies
**type**\: :py:class:`IsisfrrTiebreakerEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrTiebreakerEnum>`
.. attribute:: index
Preference order among tiebreakers
**type**\: int
**range:** 1..255
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.tiebreaker = None
self.index = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
if self.tiebreaker is None:
raise YPYModelError('Key property tiebreaker is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-tiebreaker[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + '][Cisco-IOS-XR-clns-isis-cfg:tiebreaker = ' + str(self.tiebreaker) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.tiebreaker is not None:
return True
if self.index is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers.FrrTiebreaker']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-tiebreakers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_tiebreaker is not None:
for child_ref in self.frr_tiebreaker:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrTiebreakers']['meta_info']
class FrrUseCandOnlies(object):
"""
FRR use candidate only configuration
.. attribute:: frr_use_cand_only
Configure use candidate only to exclude interfaces as backup
**type**\: list of :py:class:`FrrUseCandOnly <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies.FrrUseCandOnly>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_use_cand_only = YList()
self.frr_use_cand_only.parent = self
self.frr_use_cand_only.name = 'frr_use_cand_only'
class FrrUseCandOnly(object):
"""
Configure use candidate only to exclude
interfaces as backup
.. attribute:: frr_type <key>
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.frr_type is None:
raise YPYModelError('Key property frr_type is None')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-use-cand-only[Cisco-IOS-XR-clns-isis-cfg:frr-type = ' + str(self.frr_type) + '][Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
return True
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies.FrrUseCandOnly']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-use-cand-onlies'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_use_cand_only is not None:
for child_ref in self.frr_use_cand_only:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable.FrrUseCandOnlies']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_load_sharings is not None and self.frr_load_sharings._has_data():
return True
if self.frr_remote_lfa_prefixes is not None and self.frr_remote_lfa_prefixes._has_data():
return True
if self.frr_tiebreakers is not None and self.frr_tiebreakers._has_data():
return True
if self.frr_use_cand_onlies is not None and self.frr_use_cand_onlies._has_data():
return True
if self.priority_limits is not None and self.priority_limits._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.FrrTable']['meta_info']
class SpfPrefixPriorities(object):
"""
SPF Prefix Priority configuration
.. attribute:: spf_prefix_priority
Determine SPF priority for prefixes
**type**\: list of :py:class:`SpfPrefixPriority <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities.SpfPrefixPriority>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.spf_prefix_priority = YList()
self.spf_prefix_priority.parent = self
self.spf_prefix_priority.name = 'spf_prefix_priority'
class SpfPrefixPriority(object):
"""
Determine SPF priority for prefixes
.. attribute:: level <key>
SPF Level for prefix prioritization
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: prefix_priority_type <key>
SPF Priority to assign matching prefixes
**type**\: :py:class:`IsisPrefixPriorityEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisPrefixPriorityEnum>`
.. attribute:: access_list_name
Access List to determine prefixes for this priority
**type**\: str
.. attribute:: admin_tag
Tag value to determine prefixes for this priority
**type**\: int
**range:** 1..4294967295
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.prefix_priority_type = None
self.access_list_name = None
self.admin_tag = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
if self.prefix_priority_type is None:
raise YPYModelError('Key property prefix_priority_type is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-prefix-priority[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + '][Cisco-IOS-XR-clns-isis-cfg:prefix-priority-type = ' + str(self.prefix_priority_type) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.prefix_priority_type is not None:
return True
if self.access_list_name is not None:
return True
if self.admin_tag is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities.SpfPrefixPriority']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-prefix-priorities'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.spf_prefix_priority is not None:
for child_ref in self.spf_prefix_priority:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPrefixPriorities']['meta_info']
class SummaryPrefixes(object):
"""
Summary\-prefix configuration
.. attribute:: summary_prefix
Configure IP address prefixes to advertise
**type**\: list of :py:class:`SummaryPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes.SummaryPrefix>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.summary_prefix = YList()
self.summary_prefix.parent = self
self.summary_prefix.name = 'summary_prefix'
class SummaryPrefix(object):
"""
Configure IP address prefixes to advertise
.. attribute:: address_prefix <key>
IP summary address prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
.. attribute:: level
Level in which to summarize routes
**type**\: int
**range:** 1..2
.. attribute:: tag
The tag value
**type**\: int
**range:** 1..4294967295
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address_prefix = None
self.level = None
self.tag = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.address_prefix is None:
raise YPYModelError('Key property address_prefix is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:summary-prefix[Cisco-IOS-XR-clns-isis-cfg:address-prefix = ' + str(self.address_prefix) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address_prefix is not None:
return True
if self.level is not None:
return True
if self.tag is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes.SummaryPrefix']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:summary-prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.summary_prefix is not None:
for child_ref in self.summary_prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SummaryPrefixes']['meta_info']
class MicroLoopAvoidance(object):
"""
Micro Loop Avoidance configuration
.. attribute:: enable
MicroLoop avoidance enable configuration
**type**\: :py:class:`IsisMicroLoopAvoidanceEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMicroLoopAvoidanceEnum>`
.. attribute:: rib_update_delay
Value of delay in msecs in updating RIB
**type**\: int
**range:** 1000..65535
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
self.rib_update_delay = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:micro-loop-avoidance'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.rib_update_delay is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MicroLoopAvoidance']['meta_info']
class Ucmp(object):
"""
UCMP (UnEqual Cost MultiPath) configuration
.. attribute:: delay_interval
Delay in msecs between primary SPF and UCMP computation
**type**\: int
**range:** 100..65535
.. attribute:: enable
UCMP feature enable configuration
**type**\: :py:class:`Enable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.Enable>`
.. attribute:: exclude_interfaces
Interfaces excluded from UCMP path computation
**type**\: :py:class:`ExcludeInterfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.delay_interval = None
self.enable = Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.Enable()
self.enable.parent = self
self.exclude_interfaces = Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces()
self.exclude_interfaces.parent = self
class Enable(object):
"""
UCMP feature enable configuration
.. attribute:: prefix_list_name
Name of the Prefix List
**type**\: str
.. attribute:: variance
Value of variance
**type**\: int
**range:** 101..10000
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.variance = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:enable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.prefix_list_name is not None:
return True
if self.variance is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.Enable']['meta_info']
class ExcludeInterfaces(object):
"""
Interfaces excluded from UCMP path
computation
.. attribute:: exclude_interface
Exclude this interface from UCMP path computation
**type**\: list of :py:class:`ExcludeInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces.ExcludeInterface>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.exclude_interface = YList()
self.exclude_interface.parent = self
self.exclude_interface.name = 'exclude_interface'
class ExcludeInterface(object):
"""
Exclude this interface from UCMP path
computation
.. attribute:: interface_name <key>
Name of the interface to be excluded
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:exclude-interface[Cisco-IOS-XR-clns-isis-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces.ExcludeInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:exclude-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.exclude_interface is not None:
for child_ref in self.exclude_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp.ExcludeInterfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:ucmp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.delay_interval is not None:
return True
if self.enable is not None and self.enable._has_data():
return True
if self.exclude_interfaces is not None and self.exclude_interfaces._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ucmp']['meta_info']
class MaxRedistPrefixes(object):
"""
Maximum number of redistributed
prefixesconfiguration
.. attribute:: max_redist_prefix
An upper limit on the number of redistributed prefixes which may be included in the local system's LSP
**type**\: list of :py:class:`MaxRedistPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes.MaxRedistPrefix>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.max_redist_prefix = YList()
self.max_redist_prefix.parent = self
self.max_redist_prefix.name = 'max_redist_prefix'
class MaxRedistPrefix(object):
"""
An upper limit on the number of
redistributed prefixes which may be
included in the local system's LSP
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: prefix_limit
Max number of prefixes
**type**\: int
**range:** 1..28000
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.prefix_limit = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:max-redist-prefix[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.prefix_limit is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes.MaxRedistPrefix']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:max-redist-prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.max_redist_prefix is not None:
for child_ref in self.max_redist_prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MaxRedistPrefixes']['meta_info']
class Propagations(object):
"""
Route propagation configuration
.. attribute:: propagation
Propagate routes between IS\-IS levels
**type**\: list of :py:class:`Propagation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Propagations.Propagation>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.propagation = YList()
self.propagation.parent = self
self.propagation.name = 'propagation'
class Propagation(object):
"""
Propagate routes between IS\-IS levels
.. attribute:: destination_level <key>
Destination level for routes. Must differ from SourceLevel
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: source_level <key>
Source level for routes
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: route_policy_name
Route policy limiting routes to be propagated
**type**\: str
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.destination_level = None
self.source_level = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.destination_level is None:
raise YPYModelError('Key property destination_level is None')
if self.source_level is None:
raise YPYModelError('Key property source_level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:propagation[Cisco-IOS-XR-clns-isis-cfg:destination-level = ' + str(self.destination_level) + '][Cisco-IOS-XR-clns-isis-cfg:source-level = ' + str(self.source_level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.destination_level is not None:
return True
if self.source_level is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Propagations.Propagation']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:propagations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.propagation is not None:
for child_ref in self.propagation:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Propagations']['meta_info']
class Redistributions(object):
"""
Protocol redistribution configuration
.. attribute:: redistribution
Redistribution of other protocols into this IS\-IS instance
**type**\: list of :py:class:`Redistribution <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.redistribution = YList()
self.redistribution.parent = self
self.redistribution.name = 'redistribution'
class Redistribution(object):
"""
Redistribution of other protocols into
this IS\-IS instance
.. attribute:: protocol_name <key>
The protocol to be redistributed. OSPFv3 may not be specified for an IPv4 topology and OSPF may not be specified for an IPv6 topology
**type**\: :py:class:`IsisRedistProtoEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisRedistProtoEnum>`
.. attribute:: bgp
bgp
**type**\: list of :py:class:`Bgp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Bgp>`
.. attribute:: connected_or_static_or_rip_or_subscriber_or_mobile
connected or static or rip or subscriber or mobile
**type**\: :py:class:`ConnectedOrStaticOrRipOrSubscriberOrMobile <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile>`
.. attribute:: eigrp
eigrp
**type**\: list of :py:class:`Eigrp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Eigrp>`
.. attribute:: ospf_or_ospfv3_or_isis_or_application
ospf or ospfv3 or isis or application
**type**\: list of :py:class:`OspfOrOspfv3OrIsisOrApplication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.protocol_name = None
self.bgp = YList()
self.bgp.parent = self
self.bgp.name = 'bgp'
self.connected_or_static_or_rip_or_subscriber_or_mobile = None
self.eigrp = YList()
self.eigrp.parent = self
self.eigrp.name = 'eigrp'
self.ospf_or_ospfv3_or_isis_or_application = YList()
self.ospf_or_ospfv3_or_isis_or_application.parent = self
self.ospf_or_ospfv3_or_isis_or_application.name = 'ospf_or_ospfv3_or_isis_or_application'
class ConnectedOrStaticOrRipOrSubscriberOrMobile(object):
"""
connected or static or rip or subscriber
or mobile
.. attribute:: levels
Levels to redistribute routes into
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: metric
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: metric_type
IS\-IS metric type
**type**\: :py:class:`IsisMetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricEnum>`
.. attribute:: ospf_route_type
OSPF route types to redistribute. May only be specified if Protocol is OSPF
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: route_policy_name
Route policy to control redistribution
**type**\: str
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.levels = None
self.metric = None
self.metric_type = None
self.ospf_route_type = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:connected-or-static-or-rip-or-subscriber-or-mobile'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.levels is not None:
return True
if self.metric is not None:
return True
if self.metric_type is not None:
return True
if self.ospf_route_type is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.ConnectedOrStaticOrRipOrSubscriberOrMobile']['meta_info']
class OspfOrOspfv3OrIsisOrApplication(object):
"""
ospf or ospfv3 or isis or application
.. attribute:: instance_name <key>
Protocol Instance Identifier. Mandatory for ISIS, OSPF and application, must not be specified otherwise
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: levels
Levels to redistribute routes into
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: metric
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: metric_type
IS\-IS metric type
**type**\: :py:class:`IsisMetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricEnum>`
.. attribute:: ospf_route_type
OSPF route types to redistribute. May only be specified if Protocol is OSPF
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: route_policy_name
Route policy to control redistribution
**type**\: str
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.instance_name = None
self.levels = None
self.metric = None
self.metric_type = None
self.ospf_route_type = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.instance_name is None:
raise YPYModelError('Key property instance_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:ospf-or-ospfv3-or-isis-or-application[Cisco-IOS-XR-clns-isis-cfg:instance-name = ' + str(self.instance_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.instance_name is not None:
return True
if self.levels is not None:
return True
if self.metric is not None:
return True
if self.metric_type is not None:
return True
if self.ospf_route_type is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.OspfOrOspfv3OrIsisOrApplication']['meta_info']
class Bgp(object):
"""
bgp
.. attribute:: as_xx <key>
First half of BGP AS number in XX.YY format. Mandatory if Protocol is BGP and must not be specified otherwise. Must be a non\-zero value if second half is zero
**type**\: int
**range:** 0..65535
.. attribute:: as_yy <key>
Second half of BGP AS number in XX.YY format. Mandatory if Protocol is BGP and must not be specified otherwise. Must be a non\-zero value if first half is zero
**type**\: int
**range:** 0..4294967295
.. attribute:: levels
Levels to redistribute routes into
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: metric
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: metric_type
IS\-IS metric type
**type**\: :py:class:`IsisMetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricEnum>`
.. attribute:: ospf_route_type
OSPF route types to redistribute. May only be specified if Protocol is OSPF
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: route_policy_name
Route policy to control redistribution
**type**\: str
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.as_xx = None
self.as_yy = None
self.levels = None
self.metric = None
self.metric_type = None
self.ospf_route_type = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.as_xx is None:
raise YPYModelError('Key property as_xx is None')
if self.as_yy is None:
raise YPYModelError('Key property as_yy is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:bgp[Cisco-IOS-XR-clns-isis-cfg:as-xx = ' + str(self.as_xx) + '][Cisco-IOS-XR-clns-isis-cfg:as-yy = ' + str(self.as_yy) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.as_xx is not None:
return True
if self.as_yy is not None:
return True
if self.levels is not None:
return True
if self.metric is not None:
return True
if self.metric_type is not None:
return True
if self.ospf_route_type is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Bgp']['meta_info']
class Eigrp(object):
"""
eigrp
.. attribute:: as_zz <key>
Eigrp as number
**type**\: int
**range:** 1..65535
.. attribute:: levels
Levels to redistribute routes into
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: metric
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: metric_type
IS\-IS metric type
**type**\: :py:class:`IsisMetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisMetricEnum>`
.. attribute:: ospf_route_type
OSPF route types to redistribute. May only be specified if Protocol is OSPF
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: route_policy_name
Route policy to control redistribution
**type**\: str
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.as_zz = None
self.levels = None
self.metric = None
self.metric_type = None
self.ospf_route_type = None
self.route_policy_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.as_zz is None:
raise YPYModelError('Key property as_zz is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:eigrp[Cisco-IOS-XR-clns-isis-cfg:as-zz = ' + str(self.as_zz) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.as_zz is not None:
return True
if self.levels is not None:
return True
if self.metric is not None:
return True
if self.metric_type is not None:
return True
if self.ospf_route_type is not None:
return True
if self.route_policy_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution.Eigrp']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.protocol_name is None:
raise YPYModelError('Key property protocol_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:redistribution[Cisco-IOS-XR-clns-isis-cfg:protocol-name = ' + str(self.protocol_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.protocol_name is not None:
return True
if self.bgp is not None:
for child_ref in self.bgp:
if child_ref._has_data():
return True
if self.connected_or_static_or_rip_or_subscriber_or_mobile is not None and self.connected_or_static_or_rip_or_subscriber_or_mobile._has_data():
return True
if self.eigrp is not None:
for child_ref in self.eigrp:
if child_ref._has_data():
return True
if self.ospf_or_ospfv3_or_isis_or_application is not None:
for child_ref in self.ospf_or_ospfv3_or_isis_or_application:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions.Redistribution']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:redistributions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.redistribution is not None:
for child_ref in self.redistribution:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Redistributions']['meta_info']
class SpfPeriodicIntervals(object):
"""
Peoridic SPF configuration
.. attribute:: spf_periodic_interval
Maximum interval between spf runs
**type**\: list of :py:class:`SpfPeriodicInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals.SpfPeriodicInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.spf_periodic_interval = YList()
self.spf_periodic_interval.parent = self
self.spf_periodic_interval.name = 'spf_periodic_interval'
class SpfPeriodicInterval(object):
"""
Maximum interval between spf runs
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: periodic_interval
Maximum interval in between SPF runs in seconds
**type**\: int
**range:** 0..3600
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.periodic_interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-periodic-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.periodic_interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals.SpfPeriodicInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-periodic-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.spf_periodic_interval is not None:
for child_ref in self.spf_periodic_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfPeriodicIntervals']['meta_info']
class SpfIntervals(object):
"""
SPF\-interval configuration
.. attribute:: spf_interval
Route calculation scheduling parameters
**type**\: list of :py:class:`SpfInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals.SpfInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.spf_interval = YList()
self.spf_interval.parent = self
self.spf_interval.name = 'spf_interval'
class SpfInterval(object):
"""
Route calculation scheduling parameters
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: initial_wait
Initial wait before running a route calculation in milliseconds
**type**\: int
**range:** 0..120000
.. attribute:: maximum_wait
Maximum wait before running a route calculation in milliseconds
**type**\: int
**range:** 0..120000
.. attribute:: secondary_wait
Secondary wait before running a route calculation in milliseconds
**type**\: int
**range:** 0..120000
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.initial_wait = None
self.maximum_wait = None
self.secondary_wait = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.initial_wait is not None:
return True
if self.maximum_wait is not None:
return True
if self.secondary_wait is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals.SpfInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:spf-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.spf_interval is not None:
for child_ref in self.spf_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.SpfIntervals']['meta_info']
class MonitorConvergence(object):
"""
Enable convergence monitoring
.. attribute:: enable
Enable convergence monitoring
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: prefix_list
Enable the monitoring of individual prefixes (prefix list name)
**type**\: str
.. attribute:: track_ip_frr
Enable the Tracking of IP\-Frr Convergence
**type**\: :py:class:`Empty <ydk.types.Empty>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
self.prefix_list = None
self.track_ip_frr = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:monitor-convergence'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
if self.prefix_list is not None:
return True
if self.track_ip_frr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MonitorConvergence']['meta_info']
class DefaultInformation(object):
"""
Control origination of a default route with
the option of using a policy. If no policy
is specified the default route is
advertised with zero cost in level 2 only.
.. attribute:: external
Flag to indicate that the default prefix should be originated as an external route
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: policy_name
Policy name
**type**\: str
.. attribute:: use_policy
Flag to indicate whether default origination is controlled using a policy
**type**\: bool
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.external = None
self.policy_name = None
self.use_policy = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:default-information'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.external is not None:
return True
if self.policy_name is not None:
return True
if self.use_policy is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.DefaultInformation']['meta_info']
class AdminDistances(object):
"""
Per\-route administrative
distanceconfiguration
.. attribute:: admin_distance
Administrative distance configuration. The supplied distance is applied to all routes discovered from the specified source, or only those that match the supplied prefix list if this is specified
**type**\: list of :py:class:`AdminDistance <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances.AdminDistance>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.admin_distance = YList()
self.admin_distance.parent = self
self.admin_distance.name = 'admin_distance'
class AdminDistance(object):
"""
Administrative distance configuration. The
supplied distance is applied to all routes
discovered from the specified source, or
only those that match the supplied prefix
list if this is specified
.. attribute:: address_prefix <key>
IP route source prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
.. attribute:: distance
Administrative distance
**type**\: int
**range:** 1..255
**mandatory**\: True
.. attribute:: prefix_list
List of prefixes to which this distance applies
**type**\: str
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address_prefix = None
self.distance = None
self.prefix_list = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.address_prefix is None:
raise YPYModelError('Key property address_prefix is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:admin-distance[Cisco-IOS-XR-clns-isis-cfg:address-prefix = ' + str(self.address_prefix) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address_prefix is not None:
return True
if self.distance is not None:
return True
if self.prefix_list is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances.AdminDistance']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:admin-distances'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_distance is not None:
for child_ref in self.admin_distance:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.AdminDistances']['meta_info']
class Ispf(object):
"""
ISPF configuration
.. attribute:: states
ISPF state (enable/disable)
**type**\: :py:class:`States <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.states = Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States()
self.states.parent = self
class States(object):
"""
ISPF state (enable/disable)
.. attribute:: state
Enable/disable ISPF
**type**\: list of :py:class:`State <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States.State>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.state = YList()
self.state.parent = self
self.state.name = 'state'
class State(object):
"""
Enable/disable ISPF
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: state
State
**type**\: :py:class:`IsisispfStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisispfStateEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.state = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:state[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States.State']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:states'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.state is not None:
for child_ref in self.state:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ispf.States']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:ispf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.states is not None and self.states._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Ispf']['meta_info']
class MplsLdpGlobal(object):
"""
MPLS LDP configuration. MPLS LDP
configuration will only be applied for the
IPv4\-unicast address\-family.
.. attribute:: auto_config
If TRUE, LDP will be enabled onall IS\-IS interfaces enabled for this address\-family
**type**\: bool
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.auto_config = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:mpls-ldp-global'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.auto_config is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.MplsLdpGlobal']['meta_info']
class Mpls(object):
"""
MPLS configuration. MPLS configuration will
only be applied for the IPv4\-unicast
address\-family.
.. attribute:: igp_intact
Install TE and non\-TE nexthops in the RIB
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: level
Enable MPLS for an IS\-IS at the given levels
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: multicast_intact
Install non\-TE nexthops in the RIB for use by multicast
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: router_id
Traffic Engineering stable IP address for system
**type**\: :py:class:`RouterId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.igp_intact = None
self.level = None
self.multicast_intact = None
self.router_id = Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId()
self.router_id.parent = self
class RouterId(object):
"""
Traffic Engineering stable IP address for
system
.. attribute:: address
IPv4 address to be used as a router ID. Precisely one of Address and Interface must be specified
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: interface_name
Interface with designated stable IP address to be used as a router ID. This must be a Loopback interface. Precisely one of Address and Interface must be specified
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address = None
self.interface_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:router-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address is not None:
return True
if self.interface_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Mpls.RouterId']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:mpls'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.igp_intact is not None:
return True
if self.level is not None:
return True
if self.multicast_intact is not None:
return True
if self.router_id is not None and self.router_id._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Mpls']['meta_info']
class Metrics(object):
"""
Metric configuration
.. attribute:: metric
Metric configuration. Legal value depends on the metric\-style specified for the topology. If the metric\-style defined is narrow, then only a value between <1\-63> is allowed and if the metric\-style is defined as wide, then a value between <1\-16777215> is allowed as the metric value. All routers exclude links with the maximum wide metric (16777215) from their SPF
**type**\: list of :py:class:`Metric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.metric = YList()
self.metric.parent = self
self.metric.name = 'metric'
class Metric(object):
"""
Metric configuration. Legal value depends on
the metric\-style specified for the topology. If
the metric\-style defined is narrow, then only a
value between <1\-63> is allowed and if the
metric\-style is defined as wide, then a value
between <1\-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: metric
Allowed metric\: <1\-63> for narrow, <1\-16777215> for wide
**type**\: one of the below types:
**type**\: :py:class:`MetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum>`
**mandatory**\: True
----
**type**\: int
**range:** 1..16777215
**mandatory**\: True
----
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.metric = None
class MetricEnum(Enum):
"""
MetricEnum
Allowed metric\: <1\-63> for narrow,
<1\-16777215> for wide
.. data:: MAXIMUM = 16777215
Maximum wide metric. All routers will
exclude this link from their SPF
"""
MAXIMUM = 16777215
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric.MetricEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.metric is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Metrics.Metric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.metric is not None:
for child_ref in self.metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Metrics']['meta_info']
class Weights(object):
"""
Weight configuration
.. attribute:: weight
Weight configuration under interface for load balancing
**type**\: list of :py:class:`Weight <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.weight = YList()
self.weight.parent = self
self.weight.name = 'weight'
class Weight(object):
"""
Weight configuration under interface for load
balancing
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: weight
Weight to be configured under interface for Load Balancing. Allowed weight\: <1\-16777215>
**type**\: int
**range:** 1..16777214
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.weight = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:weight[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.weight is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Weights.Weight']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:weights'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.weight is not None:
for child_ref in self.weight:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName.Weights']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.topology_name is None:
raise YPYModelError('Key property topology_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:topology-name[Cisco-IOS-XR-clns-isis-cfg:topology-name = ' + str(self.topology_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.topology_name is not None:
return True
if self.adjacency_check is not None:
return True
if self.admin_distances is not None and self.admin_distances._has_data():
return True
if self.advertise_passive_only is not None:
return True
if self.apply_weight is not None:
return True
if self.attached_bit is not None:
return True
if self.default_admin_distance is not None:
return True
if self.default_information is not None and self.default_information._has_data():
return True
if self.frr_table is not None and self.frr_table._has_data():
return True
if self.ignore_attached_bit is not None:
return True
if self.ispf is not None and self.ispf._has_data():
return True
if self.max_redist_prefixes is not None and self.max_redist_prefixes._has_data():
return True
if self.maximum_paths is not None:
return True
if self.metric_styles is not None and self.metric_styles._has_data():
return True
if self.metrics is not None and self.metrics._has_data():
return True
if self.micro_loop_avoidance is not None and self.micro_loop_avoidance._has_data():
return True
if self.monitor_convergence is not None and self.monitor_convergence._has_data():
return True
if self.mpls is not None and self.mpls._has_data():
return True
if self.mpls_ldp_global is not None and self.mpls_ldp_global._has_data():
return True
if self.propagations is not None and self.propagations._has_data():
return True
if self.redistributions is not None and self.redistributions._has_data():
return True
if self.route_source_first_hop is not None:
return True
if self.segment_routing is not None and self.segment_routing._has_data():
return True
if self.single_topology is not None:
return True
if self.spf_intervals is not None and self.spf_intervals._has_data():
return True
if self.spf_periodic_intervals is not None and self.spf_periodic_intervals._has_data():
return True
if self.spf_prefix_priorities is not None and self.spf_prefix_priorities._has_data():
return True
if self.summary_prefixes is not None and self.summary_prefixes._has_data():
return True
if self.topology_id is not None:
return True
if self.ucmp is not None and self.ucmp._has_data():
return True
if self.weights is not None and self.weights._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af.TopologyName']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.af_name is None:
raise YPYModelError('Key property af_name is None')
if self.saf_name is None:
raise YPYModelError('Key property saf_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:af[Cisco-IOS-XR-clns-isis-cfg:af-name = ' + str(self.af_name) + '][Cisco-IOS-XR-clns-isis-cfg:saf-name = ' + str(self.saf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.af_name is not None:
return True
if self.saf_name is not None:
return True
if self.af_data is not None and self.af_data._has_data():
return True
if self.topology_name is not None:
for child_ref in self.topology_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs.Af']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:afs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.af is not None:
for child_ref in self.af:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Afs']['meta_info']
class LspRefreshIntervals(object):
"""
LSP refresh\-interval configuration
.. attribute:: lsp_refresh_interval
Interval between re\-flooding of unchanged LSPs
**type**\: list of :py:class:`LspRefreshInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspRefreshIntervals.LspRefreshInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_refresh_interval = YList()
self.lsp_refresh_interval.parent = self
self.lsp_refresh_interval.name = 'lsp_refresh_interval'
class LspRefreshInterval(object):
"""
Interval between re\-flooding of unchanged
LSPs
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: interval
Seconds
**type**\: int
**range:** 1..65535
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-refresh-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspRefreshIntervals.LspRefreshInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-refresh-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_refresh_interval is not None:
for child_ref in self.lsp_refresh_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspRefreshIntervals']['meta_info']
class Distribute(object):
"""
IS\-IS Distribute BGP\-LS configuration
.. attribute:: dist_inst_id
Instance ID
**type**\: int
**range:** 1..65535
.. attribute:: dist_throttle
Seconds
**type**\: int
**range:** 5..20
.. attribute:: level
Level
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.dist_inst_id = None
self.dist_throttle = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:distribute'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.dist_inst_id is not None:
return True
if self.dist_throttle is not None:
return True
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Distribute']['meta_info']
class LspAcceptPasswords(object):
"""
LSP/SNP accept password configuration
.. attribute:: lsp_accept_password
LSP/SNP accept passwords. This requires the existence of an LSPPassword of the same level
**type**\: list of :py:class:`LspAcceptPassword <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspAcceptPasswords.LspAcceptPassword>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_accept_password = YList()
self.lsp_accept_password.parent = self
self.lsp_accept_password.name = 'lsp_accept_password'
class LspAcceptPassword(object):
"""
LSP/SNP accept passwords. This requires the
existence of an LSPPassword of the same level
.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: password
Password
**type**\: str
**pattern:** (!.+)\|([^!].+)
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.password = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-accept-password[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.password is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspAcceptPasswords.LspAcceptPassword']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-accept-passwords'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_accept_password is not None:
for child_ref in self.lsp_accept_password:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspAcceptPasswords']['meta_info']
class LspMtus(object):
"""
LSP MTU configuration
.. attribute:: lsp_mtu
LSP MTU
**type**\: list of :py:class:`LspMtu <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspMtus.LspMtu>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_mtu = YList()
self.lsp_mtu.parent = self
self.lsp_mtu.name = 'lsp_mtu'
class LspMtu(object):
"""
LSP MTU
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: mtu
Bytes
**type**\: int
**range:** 128..4352
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.mtu = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-mtu[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.mtu is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspMtus.LspMtu']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-mtus'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_mtu is not None:
for child_ref in self.lsp_mtu:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspMtus']['meta_info']
class Nsf(object):
"""
IS\-IS NSF configuration
.. attribute:: flavor
NSF not configured if item is deleted
**type**\: :py:class:`IsisNsfFlavorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisNsfFlavorEnum>`
.. attribute:: interface_timer
Per\-interface time period to wait for a restart ACK during an IETF\-NSF restart. This configuration has no effect if IETF\-NSF is not configured
**type**\: int
**range:** 1..20
.. attribute:: lifetime
Maximum route lifetime following restart. When this lifetime expires, old routes will be purged from the RIB
**type**\: int
**range:** 5..300
.. attribute:: max_interface_timer_expiry
Maximum number of times an interface timer may expire during an IETF\-NSF restart before the NSF restart is aborted. This configuration has no effect if IETF NSF is not configured
**type**\: int
**range:** 1..10
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.flavor = None
self.interface_timer = None
self.lifetime = None
self.max_interface_timer_expiry = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:nsf'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.flavor is not None:
return True
if self.interface_timer is not None:
return True
if self.lifetime is not None:
return True
if self.max_interface_timer_expiry is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Nsf']['meta_info']
class LinkGroups(object):
"""
Link Group
.. attribute:: link_group
Configuration for link group name
**type**\: list of :py:class:`LinkGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LinkGroups.LinkGroup>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.link_group = YList()
self.link_group.parent = self
self.link_group.name = 'link_group'
class LinkGroup(object):
"""
Configuration for link group name
.. attribute:: link_group_name <key>
Link Group Name
**type**\: str
**range:** 0..40
.. attribute:: enable
Flag to indicate that linkgroup should be running. This must be the first object created when a linkgroup is configured, and the last object deleted when it is deconfigured. When this object is deleted, the IS\-IS linkgroup will be removed
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: metric_offset
Metric for redistributed routes\: <0\-63> for narrow, <0\-16777215> for wide
**type**\: int
**range:** 0..16777215
.. attribute:: minimum_members
Minimum Members
**type**\: int
**range:** 2..64
.. attribute:: revert_members
Revert Members
**type**\: int
**range:** 2..64
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.link_group_name = None
self.enable = None
self.metric_offset = None
self.minimum_members = None
self.revert_members = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.link_group_name is None:
raise YPYModelError('Key property link_group_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:link-group[Cisco-IOS-XR-clns-isis-cfg:link-group-name = ' + str(self.link_group_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.link_group_name is not None:
return True
if self.enable is not None:
return True
if self.metric_offset is not None:
return True
if self.minimum_members is not None:
return True
if self.revert_members is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LinkGroups.LinkGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:link-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.link_group is not None:
for child_ref in self.link_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LinkGroups']['meta_info']
class LspCheckIntervals(object):
"""
LSP checksum check interval configuration
.. attribute:: lsp_check_interval
LSP checksum check interval parameters
**type**\: list of :py:class:`LspCheckInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspCheckIntervals.LspCheckInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_check_interval = YList()
self.lsp_check_interval.parent = self
self.lsp_check_interval.name = 'lsp_check_interval'
class LspCheckInterval(object):
"""
LSP checksum check interval parameters
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: interval
LSP checksum check interval time in seconds
**type**\: int
**range:** 10..65535
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-check-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspCheckIntervals.LspCheckInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-check-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_check_interval is not None:
for child_ref in self.lsp_check_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspCheckIntervals']['meta_info']
class LspPasswords(object):
"""
LSP/SNP password configuration
.. attribute:: lsp_password
LSP/SNP passwords. This must exist if an LSPAcceptPassword of the same level exists
**type**\: list of :py:class:`LspPassword <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspPasswords.LspPassword>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_password = YList()
self.lsp_password.parent = self
self.lsp_password.name = 'lsp_password'
class LspPassword(object):
"""
LSP/SNP passwords. This must exist if an
LSPAcceptPassword of the same level exists.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: algorithm
Algorithm
**type**\: :py:class:`IsisAuthenticationAlgorithmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAuthenticationAlgorithmEnum>`
**mandatory**\: True
.. attribute:: authentication_type
SNP packet authentication mode
**type**\: :py:class:`IsisSnpAuthEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisSnpAuthEnum>`
**mandatory**\: True
.. attribute:: failure_mode
Failure Mode
**type**\: :py:class:`IsisAuthenticationFailureModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAuthenticationFailureModeEnum>`
**mandatory**\: True
.. attribute:: password
Password or unencrypted Key Chain name
**type**\: str
**pattern:** (!.+)\|([^!].+)
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.algorithm = None
self.authentication_type = None
self.failure_mode = None
self.password = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-password[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.algorithm is not None:
return True
if self.authentication_type is not None:
return True
if self.failure_mode is not None:
return True
if self.password is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspPasswords.LspPassword']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-passwords'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_password is not None:
for child_ref in self.lsp_password:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspPasswords']['meta_info']
class Nets(object):
"""
NET configuration
.. attribute:: net
Network Entity Title (NET)
**type**\: list of :py:class:`Net <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Nets.Net>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.net = YList()
self.net.parent = self
self.net.name = 'net'
class Net(object):
"""
Network Entity Title (NET)
.. attribute:: net_name <key>
Network Entity Title
**type**\: str
**pattern:** [a\-fA\-F0\-9]{2}(\\.[a\-fA\-F0\-9]{4}){3,9}\\.[a\-fA\-F0\-9]{2}
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.net_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.net_name is None:
raise YPYModelError('Key property net_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:net[Cisco-IOS-XR-clns-isis-cfg:net-name = ' + str(self.net_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.net_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Nets.Net']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:nets'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.net is not None:
for child_ref in self.net:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Nets']['meta_info']
class LspLifetimes(object):
"""
LSP lifetime configuration
.. attribute:: lsp_lifetime
Maximum LSP lifetime
**type**\: list of :py:class:`LspLifetime <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.LspLifetimes.LspLifetime>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_lifetime = YList()
self.lsp_lifetime.parent = self
self.lsp_lifetime.name = 'lsp_lifetime'
class LspLifetime(object):
"""
Maximum LSP lifetime
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: lifetime
Seconds
**type**\: int
**range:** 1..65535
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.lifetime = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-lifetime[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.lifetime is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspLifetimes.LspLifetime']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-lifetimes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_lifetime is not None:
for child_ref in self.lsp_lifetime:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.LspLifetimes']['meta_info']
class OverloadBits(object):
"""
LSP overload\-bit configuration
.. attribute:: overload_bit
Set the overload bit in the System LSP so that other routers avoid this one in SPF calculations. This may be done either unconditionally, or on startup until either a set time has passed or IS\-IS is informed that BGP has converged. This is an Object with a union discriminated on an integer value of the ISISOverloadBitModeType
**type**\: list of :py:class:`OverloadBit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.OverloadBits.OverloadBit>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.overload_bit = YList()
self.overload_bit.parent = self
self.overload_bit.name = 'overload_bit'
class OverloadBit(object):
"""
Set the overload bit in the System LSP so
that other routers avoid this one in SPF
calculations. This may be done either
unconditionally, or on startup until either a
set time has passed or IS\-IS is informed that
BGP has converged. This is an Object with a
union discriminated on an integer value of
the ISISOverloadBitModeType.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: external_adv_type
Advertise prefixes from other protocols
**type**\: :py:class:`IsisAdvTypeExternalEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAdvTypeExternalEnum>`
.. attribute:: hippity_period
Time in seconds to advertise ourself as overloaded after process startup
**type**\: int
**range:** 5..86400
.. attribute:: inter_level_adv_type
Advertise prefixes across ISIS levels
**type**\: :py:class:`IsisAdvTypeInterLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAdvTypeInterLevelEnum>`
.. attribute:: overload_bit_mode
Circumstances under which the overload bit is set in the system LSP
**type**\: :py:class:`IsisOverloadBitModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisOverloadBitModeEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.external_adv_type = None
self.hippity_period = None
self.inter_level_adv_type = None
self.overload_bit_mode = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:overload-bit[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.external_adv_type is not None:
return True
if self.hippity_period is not None:
return True
if self.inter_level_adv_type is not None:
return True
if self.overload_bit_mode is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.OverloadBits.OverloadBit']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:overload-bits'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.overload_bit is not None:
for child_ref in self.overload_bit:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.OverloadBits']['meta_info']
class Interfaces(object):
"""
Per\-interface configuration
.. attribute:: interface
Configuration for an IS\-IS interface
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
Configuration for an IS\-IS interface
.. attribute:: interface_name <key>
Interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: bfd
BFD configuration
**type**\: :py:class:`Bfd <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.Bfd>`
.. attribute:: circuit_type
Configure circuit type for interface
**type**\: :py:class:`IsisConfigurableLevelsEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisConfigurableLevelsEnum>`
.. attribute:: csnp_intervals
CSNP\-interval configuration
**type**\: :py:class:`CsnpIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals>`
.. attribute:: hello_accept_passwords
IIH accept password configuration
**type**\: :py:class:`HelloAcceptPasswords <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords>`
.. attribute:: hello_intervals
Hello\-interval configuration
**type**\: :py:class:`HelloIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloIntervals>`
.. attribute:: hello_multipliers
Hello\-multiplier configuration
**type**\: :py:class:`HelloMultipliers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers>`
.. attribute:: hello_paddings
Hello\-padding configuration
**type**\: :py:class:`HelloPaddings <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloPaddings>`
.. attribute:: hello_passwords
IIH password configuration
**type**\: :py:class:`HelloPasswords <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloPasswords>`
.. attribute:: interface_afs
Per\-interface address\-family configuration
**type**\: :py:class:`InterfaceAfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs>`
.. attribute:: link_down_fast_detect
Configure high priority detection of interface down event
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: lsp_fast_flood_thresholds
LSP fast flood threshold configuration
**type**\: :py:class:`LspFastFloodThresholds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds>`
.. attribute:: lsp_intervals
LSP\-interval configuration
**type**\: :py:class:`LspIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.LspIntervals>`
.. attribute:: lsp_retransmit_intervals
LSP\-retransmission\-interval configuration
**type**\: :py:class:`LspRetransmitIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals>`
.. attribute:: lsp_retransmit_throttle_intervals
LSP\-retransmission\-throttle\-interval configuration
**type**\: :py:class:`LspRetransmitThrottleIntervals <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals>`
.. attribute:: mesh_group
Mesh\-group configuration
**type**\: one of the below types:
**type**\: :py:class:`MeshGroupEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.MeshGroupEnum>`
----
**type**\: int
**range:** 0..4294967295
----
.. attribute:: point_to_point
IS\-IS will attempt to form point\-to\-point over LAN adjacencies over this interface
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: priorities
DIS\-election priority configuration
**type**\: :py:class:`Priorities <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.Priorities>`
.. attribute:: running
This object must be set before any other configuration is supplied for an interface, and must be the last per\-interface configuration object to be removed
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: state
Enable/Disable routing
**type**\: :py:class:`IsisInterfaceStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisInterfaceStateEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.bfd = Isis.Instances.Instance.Interfaces.Interface.Bfd()
self.bfd.parent = self
self.circuit_type = None
self.csnp_intervals = Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals()
self.csnp_intervals.parent = self
self.hello_accept_passwords = Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords()
self.hello_accept_passwords.parent = self
self.hello_intervals = Isis.Instances.Instance.Interfaces.Interface.HelloIntervals()
self.hello_intervals.parent = self
self.hello_multipliers = Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers()
self.hello_multipliers.parent = self
self.hello_paddings = Isis.Instances.Instance.Interfaces.Interface.HelloPaddings()
self.hello_paddings.parent = self
self.hello_passwords = Isis.Instances.Instance.Interfaces.Interface.HelloPasswords()
self.hello_passwords.parent = self
self.interface_afs = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs()
self.interface_afs.parent = self
self.link_down_fast_detect = None
self.lsp_fast_flood_thresholds = Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds()
self.lsp_fast_flood_thresholds.parent = self
self.lsp_intervals = Isis.Instances.Instance.Interfaces.Interface.LspIntervals()
self.lsp_intervals.parent = self
self.lsp_retransmit_intervals = Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals()
self.lsp_retransmit_intervals.parent = self
self.lsp_retransmit_throttle_intervals = Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals()
self.lsp_retransmit_throttle_intervals.parent = self
self.mesh_group = None
self.point_to_point = None
self.priorities = Isis.Instances.Instance.Interfaces.Interface.Priorities()
self.priorities.parent = self
self.running = None
self.state = None
class MeshGroupEnum(Enum):
"""
MeshGroupEnum
Mesh\-group configuration
.. data:: BLOCKED = 0
Blocked mesh group. Changed LSPs are not
flooded over blocked interfaces
"""
BLOCKED = 0
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.MeshGroupEnum']
class LspRetransmitThrottleIntervals(object):
"""
LSP\-retransmission\-throttle\-interval
configuration
.. attribute:: lsp_retransmit_throttle_interval
Minimum interval betwen retransissions of different LSPs
**type**\: list of :py:class:`LspRetransmitThrottleInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals.LspRetransmitThrottleInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_retransmit_throttle_interval = YList()
self.lsp_retransmit_throttle_interval.parent = self
self.lsp_retransmit_throttle_interval.name = 'lsp_retransmit_throttle_interval'
class LspRetransmitThrottleInterval(object):
"""
Minimum interval betwen retransissions of
different LSPs
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: interval
Milliseconds
**type**\: int
**range:** 0..65535
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-retransmit-throttle-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals.LspRetransmitThrottleInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-retransmit-throttle-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_retransmit_throttle_interval is not None:
for child_ref in self.lsp_retransmit_throttle_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitThrottleIntervals']['meta_info']
class LspRetransmitIntervals(object):
"""
LSP\-retransmission\-interval configuration
.. attribute:: lsp_retransmit_interval
Interval between retransmissions of the same LSP
**type**\: list of :py:class:`LspRetransmitInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals.LspRetransmitInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_retransmit_interval = YList()
self.lsp_retransmit_interval.parent = self
self.lsp_retransmit_interval.name = 'lsp_retransmit_interval'
class LspRetransmitInterval(object):
"""
Interval between retransmissions of the
same LSP
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: interval
Seconds
**type**\: int
**range:** 0..65535
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-retransmit-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals.LspRetransmitInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-retransmit-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_retransmit_interval is not None:
for child_ref in self.lsp_retransmit_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.LspRetransmitIntervals']['meta_info']
class Bfd(object):
"""
BFD configuration
.. attribute:: detection_multiplier
Detection multiplier for BFD sessions created by isis
**type**\: int
**range:** 2..50
.. attribute:: enable_ipv4
TRUE to enable BFD. FALSE to disable and to prevent inheritance from a parent
**type**\: bool
.. attribute:: enable_ipv6
TRUE to enable BFD. FALSE to disable and to prevent inheritance from a parent
**type**\: bool
.. attribute:: interval
Hello interval for BFD sessions created by isis
**type**\: int
**range:** 3..30000
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.detection_multiplier = None
self.enable_ipv4 = None
self.enable_ipv6 = None
self.interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:bfd'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.detection_multiplier is not None:
return True
if self.enable_ipv4 is not None:
return True
if self.enable_ipv6 is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.Bfd']['meta_info']
class Priorities(object):
"""
DIS\-election priority configuration
.. attribute:: priority
DIS\-election priority
**type**\: list of :py:class:`Priority <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.Priorities.Priority>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.priority = YList()
self.priority.parent = self
self.priority.name = 'priority'
class Priority(object):
"""
DIS\-election priority
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: priority_value
Priority
**type**\: int
**range:** 0..127
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.priority_value = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:priority[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.priority_value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.Priorities.Priority']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:priorities'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.priority is not None:
for child_ref in self.priority:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.Priorities']['meta_info']
class HelloAcceptPasswords(object):
"""
IIH accept password configuration
.. attribute:: hello_accept_password
IIH accept passwords. This requires the existence of a HelloPassword of the same level
**type**\: list of :py:class:`HelloAcceptPassword <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords.HelloAcceptPassword>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hello_accept_password = YList()
self.hello_accept_password.parent = self
self.hello_accept_password.name = 'hello_accept_password'
class HelloAcceptPassword(object):
"""
IIH accept passwords. This requires the
existence of a HelloPassword of the same
level.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: password
Password
**type**\: str
**pattern:** (!.+)\|([^!].+)
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.password = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-accept-password[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.password is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords.HelloAcceptPassword']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-accept-passwords'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.hello_accept_password is not None:
for child_ref in self.hello_accept_password:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloAcceptPasswords']['meta_info']
class HelloPasswords(object):
"""
IIH password configuration
.. attribute:: hello_password
IIH passwords. This must exist if a HelloAcceptPassword of the same level exists
**type**\: list of :py:class:`HelloPassword <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloPasswords.HelloPassword>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hello_password = YList()
self.hello_password.parent = self
self.hello_password.name = 'hello_password'
class HelloPassword(object):
"""
IIH passwords. This must exist if a
HelloAcceptPassword of the same level
exists.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: algorithm
Algorithm
**type**\: :py:class:`IsisAuthenticationAlgorithmEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAuthenticationAlgorithmEnum>`
**mandatory**\: True
.. attribute:: failure_mode
Failure Mode
**type**\: :py:class:`IsisAuthenticationFailureModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisAuthenticationFailureModeEnum>`
**mandatory**\: True
.. attribute:: password
Password or unencrypted Key Chain name
**type**\: str
**pattern:** (!.+)\|([^!].+)
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.algorithm = None
self.failure_mode = None
self.password = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-password[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.algorithm is not None:
return True
if self.failure_mode is not None:
return True
if self.password is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPasswords.HelloPassword']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-passwords'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.hello_password is not None:
for child_ref in self.hello_password:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPasswords']['meta_info']
class HelloPaddings(object):
"""
Hello\-padding configuration
.. attribute:: hello_padding
Pad IIHs to the interface MTU
**type**\: list of :py:class:`HelloPadding <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloPaddings.HelloPadding>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hello_padding = YList()
self.hello_padding.parent = self
self.hello_padding.name = 'hello_padding'
class HelloPadding(object):
"""
Pad IIHs to the interface MTU
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: padding_type
Hello padding type value
**type**\: :py:class:`IsisHelloPaddingEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisHelloPaddingEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.padding_type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-padding[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.padding_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPaddings.HelloPadding']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-paddings'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.hello_padding is not None:
for child_ref in self.hello_padding:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloPaddings']['meta_info']
class HelloMultipliers(object):
"""
Hello\-multiplier configuration
.. attribute:: hello_multiplier
Hello\-multiplier configuration. The number of successive IIHs that may be missed on an adjacency before it is considered down
**type**\: list of :py:class:`HelloMultiplier <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers.HelloMultiplier>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hello_multiplier = YList()
self.hello_multiplier.parent = self
self.hello_multiplier.name = 'hello_multiplier'
class HelloMultiplier(object):
"""
Hello\-multiplier configuration. The number
of successive IIHs that may be missed on an
adjacency before it is considered down.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: multiplier
Hello multiplier value
**type**\: int
**range:** 3..1000
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.multiplier = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-multiplier[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.multiplier is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers.HelloMultiplier']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-multipliers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.hello_multiplier is not None:
for child_ref in self.hello_multiplier:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloMultipliers']['meta_info']
class LspFastFloodThresholds(object):
"""
LSP fast flood threshold configuration
.. attribute:: lsp_fast_flood_threshold
Number of LSPs to send back to back on an interface
**type**\: list of :py:class:`LspFastFloodThreshold <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds.LspFastFloodThreshold>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_fast_flood_threshold = YList()
self.lsp_fast_flood_threshold.parent = self
self.lsp_fast_flood_threshold.name = 'lsp_fast_flood_threshold'
class LspFastFloodThreshold(object):
"""
Number of LSPs to send back to back on an
interface.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: count
Count
**type**\: int
**range:** 1..4294967295
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.count = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-fast-flood-threshold[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds.LspFastFloodThreshold']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-fast-flood-thresholds'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_fast_flood_threshold is not None:
for child_ref in self.lsp_fast_flood_threshold:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.LspFastFloodThresholds']['meta_info']
class HelloIntervals(object):
"""
Hello\-interval configuration
.. attribute:: hello_interval
Hello\-interval configuration. The interval at which IIH packets will be sent. This will be three times quicker on a LAN interface which has been electted DIS
**type**\: list of :py:class:`HelloInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.HelloIntervals.HelloInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.hello_interval = YList()
self.hello_interval.parent = self
self.hello_interval.name = 'hello_interval'
class HelloInterval(object):
"""
Hello\-interval configuration. The interval
at which IIH packets will be sent. This
will be three times quicker on a LAN
interface which has been electted DIS.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: interval
Seconds
**type**\: int
**range:** 1..65535
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloIntervals.HelloInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:hello-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.hello_interval is not None:
for child_ref in self.hello_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.HelloIntervals']['meta_info']
class InterfaceAfs(object):
"""
Per\-interface address\-family configuration
.. attribute:: interface_af
Configuration for an IS\-IS address\-family on a single interface. If a named (non\-default) topology is being created it must be multicast. Also the topology ID mustbe set first and delete last in the router configuration
**type**\: list of :py:class:`InterfaceAf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_af = YList()
self.interface_af.parent = self
self.interface_af.name = 'interface_af'
class InterfaceAf(object):
"""
Configuration for an IS\-IS address\-family
on a single interface. If a named
(non\-default) topology is being created it
must be multicast. Also the topology ID
mustbe set first and delete last in the
router configuration.
.. attribute:: af_name <key>
Address family
**type**\: :py:class:`IsisAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisAddressFamilyEnum>`
.. attribute:: saf_name <key>
Sub address family
**type**\: :py:class:`IsisSubAddressFamilyEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisSubAddressFamilyEnum>`
.. attribute:: interface_af_data
Data container
**type**\: :py:class:`InterfaceAfData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData>`
.. attribute:: topology_name
keys\: topology\-name
**type**\: list of :py:class:`TopologyName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.af_name = None
self.saf_name = None
self.interface_af_data = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData()
self.interface_af_data.parent = self
self.topology_name = YList()
self.topology_name.parent = self
self.topology_name.name = 'topology_name'
class InterfaceAfData(object):
"""
Data container.
.. attribute:: admin_tags
admin\-tag configuration
**type**\: :py:class:`AdminTags <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags>`
.. attribute:: auto_metrics
AutoMetric configuration
**type**\: :py:class:`AutoMetrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics>`
.. attribute:: interface_af_state
Interface state
**type**\: :py:class:`IsisInterfaceAfStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisInterfaceAfStateEnum>`
.. attribute:: interface_frr_table
Fast\-ReRoute configuration
**type**\: :py:class:`InterfaceFrrTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable>`
.. attribute:: interface_link_group
Provide link group name and level
**type**\: :py:class:`InterfaceLinkGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceLinkGroup>`
.. attribute:: metrics
Metric configuration
**type**\: :py:class:`Metrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics>`
.. attribute:: mpls_ldp
MPLS LDP configuration
**type**\: :py:class:`MplsLdp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.MplsLdp>`
.. attribute:: prefix_sid
Assign prefix SID to an interface, ISISPHPFlag will be rejected if set to disable, ISISEXPLICITNULLFlag will override the value of ISISPHPFlag
**type**\: :py:class:`PrefixSid <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.PrefixSid>`
.. attribute:: running
The presence of this object allows an address\-family to be run over the interface in question.This must be the first object created under the InterfaceAddressFamily container, and the last one deleted
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: weights
Weight configuration
**type**\: :py:class:`Weights <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.admin_tags = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags()
self.admin_tags.parent = self
self.auto_metrics = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics()
self.auto_metrics.parent = self
self.interface_af_state = None
self.interface_frr_table = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable()
self.interface_frr_table.parent = self
self.interface_link_group = None
self.metrics = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics()
self.metrics.parent = self
self.mpls_ldp = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.MplsLdp()
self.mpls_ldp.parent = self
self.prefix_sid = None
self.running = None
self.weights = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights()
self.weights.parent = self
class PrefixSid(object):
"""
Assign prefix SID to an interface,
ISISPHPFlag will be rejected if set to
disable, ISISEXPLICITNULLFlag will
override the value of ISISPHPFlag
.. attribute:: explicit_null
Enable/Disable Explicit\-NULL flag
**type**\: :py:class:`IsisexplicitNullFlagEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisexplicitNullFlagEnum>`
**mandatory**\: True
.. attribute:: nflag_clear
Clear N\-flag for the prefix\-SID
**type**\: :py:class:`NflagClearEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.NflagClearEnum>`
**mandatory**\: True
.. attribute:: php
Enable/Disable Penultimate Hop Popping
**type**\: :py:class:`IsisphpFlagEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisphpFlagEnum>`
**mandatory**\: True
.. attribute:: type
SID type for the interface
**type**\: :py:class:`IsissidEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsissidEnum>`
**mandatory**\: True
.. attribute:: value
SID value for the interface
**type**\: int
**range:** 0..1048575
**mandatory**\: True
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.explicit_null = None
self.nflag_clear = None
self.php = None
self.type = None
self.value = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:prefix-sid'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.explicit_null is not None:
return True
if self.nflag_clear is not None:
return True
if self.php is not None:
return True
if self.type is not None:
return True
if self.value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.PrefixSid']['meta_info']
class InterfaceFrrTable(object):
"""
Fast\-ReRoute configuration
.. attribute:: frr_exclude_interfaces
FRR exclusion configuration
**type**\: :py:class:`FrrExcludeInterfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces>`
.. attribute:: frr_remote_lfa_max_metrics
Remote LFA maxmimum metric
**type**\: :py:class:`FrrRemoteLfaMaxMetrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics>`
.. attribute:: frr_remote_lfa_types
Remote LFA Enable
**type**\: :py:class:`FrrRemoteLfaTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes>`
.. attribute:: frr_types
Type of FRR computation per level
**type**\: :py:class:`FrrTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes>`
.. attribute:: frrlfa_candidate_interfaces
FRR LFA candidate configuration
**type**\: :py:class:`FrrlfaCandidateInterfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces>`
.. attribute:: frrtilfa_types
TI LFA Enable
**type**\: :py:class:`FrrtilfaTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_exclude_interfaces = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces()
self.frr_exclude_interfaces.parent = self
self.frr_remote_lfa_max_metrics = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics()
self.frr_remote_lfa_max_metrics.parent = self
self.frr_remote_lfa_types = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes()
self.frr_remote_lfa_types.parent = self
self.frr_types = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes()
self.frr_types.parent = self
self.frrlfa_candidate_interfaces = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces()
self.frrlfa_candidate_interfaces.parent = self
self.frrtilfa_types = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes()
self.frrtilfa_types.parent = self
class FrrlfaCandidateInterfaces(object):
"""
FRR LFA candidate configuration
.. attribute:: frrlfa_candidate_interface
Include an interface to LFA candidate in computation
**type**\: list of :py:class:`FrrlfaCandidateInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frrlfa_candidate_interface = YList()
self.frrlfa_candidate_interface.parent = self
self.frrlfa_candidate_interface.name = 'frrlfa_candidate_interface'
class FrrlfaCandidateInterface(object):
"""
Include an interface to LFA candidate
in computation
.. attribute:: frr_type <key>
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
.. attribute:: interface_name <key>
Interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: level
Level
**type**\: int
**range:** 0..2
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = None
self.interface_name = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.frr_type is None:
raise YPYModelError('Key property frr_type is None')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frrlfa-candidate-interface[Cisco-IOS-XR-clns-isis-cfg:frr-type = ' + str(self.frr_type) + '][Cisco-IOS-XR-clns-isis-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
return True
if self.interface_name is not None:
return True
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frrlfa-candidate-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frrlfa_candidate_interface is not None:
for child_ref in self.frrlfa_candidate_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrlfaCandidateInterfaces']['meta_info']
class FrrRemoteLfaMaxMetrics(object):
"""
Remote LFA maxmimum metric
.. attribute:: frr_remote_lfa_max_metric
Configure the maximum metric for selecting a remote LFA node
**type**\: list of :py:class:`FrrRemoteLfaMaxMetric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_remote_lfa_max_metric = YList()
self.frr_remote_lfa_max_metric.parent = self
self.frr_remote_lfa_max_metric.name = 'frr_remote_lfa_max_metric'
class FrrRemoteLfaMaxMetric(object):
"""
Configure the maximum metric for
selecting a remote LFA node
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: max_metric
Value of the metric
**type**\: int
**range:** 1..16777215
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.max_metric = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-max-metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.max_metric is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-max-metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_remote_lfa_max_metric is not None:
for child_ref in self.frr_remote_lfa_max_metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaMaxMetrics']['meta_info']
class FrrTypes(object):
"""
Type of FRR computation per level
.. attribute:: frr_type
Type of computation for prefixes reachable via interface
**type**\: list of :py:class:`FrrType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes.FrrType>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = YList()
self.frr_type.parent = self
self.frr_type.name = 'frr_type'
class FrrType(object):
"""
Type of computation for prefixes
reachable via interface
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: type
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-type[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes.FrrType']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-types'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
for child_ref in self.frr_type:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrTypes']['meta_info']
class FrrRemoteLfaTypes(object):
"""
Remote LFA Enable
.. attribute:: frr_remote_lfa_type
Enable remote lfa for a particular level
**type**\: list of :py:class:`FrrRemoteLfaType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_remote_lfa_type = YList()
self.frr_remote_lfa_type.parent = self
self.frr_remote_lfa_type.name = 'frr_remote_lfa_type'
class FrrRemoteLfaType(object):
"""
Enable remote lfa for a particular
level
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: type
Remote LFA Type
**type**\: :py:class:`IsisRemoteLfaEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisRemoteLfaEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-type[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-types'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_remote_lfa_type is not None:
for child_ref in self.frr_remote_lfa_type:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrRemoteLfaTypes']['meta_info']
class FrrtilfaTypes(object):
"""
TI LFA Enable
.. attribute:: frrtilfa_type
Enable TI lfa for a particular level
**type**\: list of :py:class:`FrrtilfaType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frrtilfa_type = YList()
self.frrtilfa_type.parent = self
self.frrtilfa_type.name = 'frrtilfa_type'
class FrrtilfaType(object):
"""
Enable TI lfa for a particular level
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frrtilfa-type[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frrtilfa-types'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frrtilfa_type is not None:
for child_ref in self.frrtilfa_type:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrtilfaTypes']['meta_info']
class FrrExcludeInterfaces(object):
"""
FRR exclusion configuration
.. attribute:: frr_exclude_interface
Exclude an interface from computation
**type**\: list of :py:class:`FrrExcludeInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_exclude_interface = YList()
self.frr_exclude_interface.parent = self
self.frr_exclude_interface.name = 'frr_exclude_interface'
class FrrExcludeInterface(object):
"""
Exclude an interface from computation
.. attribute:: frr_type <key>
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
.. attribute:: interface_name <key>
Interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: level
Level
**type**\: int
**range:** 0..2
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = None
self.interface_name = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.frr_type is None:
raise YPYModelError('Key property frr_type is None')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-exclude-interface[Cisco-IOS-XR-clns-isis-cfg:frr-type = ' + str(self.frr_type) + '][Cisco-IOS-XR-clns-isis-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
return True
if self.interface_name is not None:
return True
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-exclude-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_exclude_interface is not None:
for child_ref in self.frr_exclude_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable.FrrExcludeInterfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interface-frr-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_exclude_interfaces is not None and self.frr_exclude_interfaces._has_data():
return True
if self.frr_remote_lfa_max_metrics is not None and self.frr_remote_lfa_max_metrics._has_data():
return True
if self.frr_remote_lfa_types is not None and self.frr_remote_lfa_types._has_data():
return True
if self.frr_types is not None and self.frr_types._has_data():
return True
if self.frrlfa_candidate_interfaces is not None and self.frrlfa_candidate_interfaces._has_data():
return True
if self.frrtilfa_types is not None and self.frrtilfa_types._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceFrrTable']['meta_info']
class MplsLdp(object):
"""
MPLS LDP configuration
.. attribute:: sync_level
Enable MPLS LDP Synchronization for an IS\-IS level
**type**\: int
**range:** 0..2
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sync_level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:mpls-ldp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.sync_level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.MplsLdp']['meta_info']
class AutoMetrics(object):
"""
AutoMetric configuration
.. attribute:: auto_metric
AutoMetric Proactive\-Protect configuration. Legal value depends on the metric\-style specified for the topology. If the metric\-style defined is narrow, then only a value between <1\-63> is allowed and if the metric\-style is defined as wide, then a value between <1\-16777214> is allowed as the auto\-metric value
**type**\: list of :py:class:`AutoMetric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics.AutoMetric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.auto_metric = YList()
self.auto_metric.parent = self
self.auto_metric.name = 'auto_metric'
class AutoMetric(object):
"""
AutoMetric Proactive\-Protect
configuration. Legal value depends on
the metric\-style specified for the
topology. If the metric\-style defined is
narrow, then only a value between <1\-63>
is allowed and if the metric\-style is
defined as wide, then a value between
<1\-16777214> is allowed as the
auto\-metric value.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: proactive_protect
Allowed auto metric\:<1\-63> for narrow ,<1\-16777214> for wide
**type**\: int
**range:** 1..16777214
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.proactive_protect = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:auto-metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.proactive_protect is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics.AutoMetric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:auto-metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.auto_metric is not None:
for child_ref in self.auto_metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AutoMetrics']['meta_info']
class AdminTags(object):
"""
admin\-tag configuration
.. attribute:: admin_tag
Admin tag for advertised interface connected routes
**type**\: list of :py:class:`AdminTag <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags.AdminTag>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.admin_tag = YList()
self.admin_tag.parent = self
self.admin_tag.name = 'admin_tag'
class AdminTag(object):
"""
Admin tag for advertised interface
connected routes
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: admin_tag
Tag to associate with connected routes
**type**\: int
**range:** 1..4294967295
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.admin_tag = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:admin-tag[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.admin_tag is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags.AdminTag']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:admin-tags'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_tag is not None:
for child_ref in self.admin_tag:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.AdminTags']['meta_info']
class InterfaceLinkGroup(object):
"""
Provide link group name and level
.. attribute:: level
Level in which link group will be effective
**type**\: int
**range:** 0..2
.. attribute:: link_group
Link Group
**type**\: str
**range:** 0..40
**mandatory**\: True
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.level = None
self.link_group = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interface-link-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.level is not None:
return True
if self.link_group is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.InterfaceLinkGroup']['meta_info']
class Metrics(object):
"""
Metric configuration
.. attribute:: metric
Metric configuration. Legal value depends on the metric\-style specified for the topology. If the metric\-style defined is narrow, then only a value between <1\-63> is allowed and if the metric\-style is defined as wide, then a value between <1\-16777215> is allowed as the metric value. All routers exclude links with the maximum wide metric (16777215) from their SPF
**type**\: list of :py:class:`Metric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics.Metric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.metric = YList()
self.metric.parent = self
self.metric.name = 'metric'
class Metric(object):
"""
Metric configuration. Legal value depends on
the metric\-style specified for the topology. If
the metric\-style defined is narrow, then only a
value between <1\-63> is allowed and if the
metric\-style is defined as wide, then a value
between <1\-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: metric
Allowed metric\: <1\-63> for narrow, <1\-16777215> for wide
**type**\: one of the below types:
**type**\: :py:class:`MetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum>`
**mandatory**\: True
----
**type**\: int
**range:** 1..16777215
**mandatory**\: True
----
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.metric = None
class MetricEnum(Enum):
"""
MetricEnum
Allowed metric\: <1\-63> for narrow,
<1\-16777215> for wide
.. data:: MAXIMUM = 16777215
Maximum wide metric. All routers will
exclude this link from their SPF
"""
MAXIMUM = 16777215
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics.Metric.MetricEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.metric is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics.Metric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.metric is not None:
for child_ref in self.metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Metrics']['meta_info']
class Weights(object):
"""
Weight configuration
.. attribute:: weight
Weight configuration under interface for load balancing
**type**\: list of :py:class:`Weight <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights.Weight>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.weight = YList()
self.weight.parent = self
self.weight.name = 'weight'
class Weight(object):
"""
Weight configuration under interface for load
balancing
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: weight
Weight to be configured under interface for Load Balancing. Allowed weight\: <1\-16777215>
**type**\: int
**range:** 1..16777214
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.weight = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:weight[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.weight is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights.Weight']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:weights'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.weight is not None:
for child_ref in self.weight:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData.Weights']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interface-af-data'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_tags is not None and self.admin_tags._has_data():
return True
if self.auto_metrics is not None and self.auto_metrics._has_data():
return True
if self.interface_af_state is not None:
return True
if self.interface_frr_table is not None and self.interface_frr_table._has_data():
return True
if self.interface_link_group is not None and self.interface_link_group._has_data():
return True
if self.metrics is not None and self.metrics._has_data():
return True
if self.mpls_ldp is not None and self.mpls_ldp._has_data():
return True
if self.prefix_sid is not None and self.prefix_sid._has_data():
return True
if self.running is not None:
return True
if self.weights is not None and self.weights._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.InterfaceAfData']['meta_info']
class TopologyName(object):
"""
keys\: topology\-name
.. attribute:: topology_name <key>
Topology Name
**type**\: str
**range:** 0..32
.. attribute:: admin_tags
admin\-tag configuration
**type**\: :py:class:`AdminTags <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags>`
.. attribute:: auto_metrics
AutoMetric configuration
**type**\: :py:class:`AutoMetrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics>`
.. attribute:: interface_af_state
Interface state
**type**\: :py:class:`IsisInterfaceAfStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisInterfaceAfStateEnum>`
.. attribute:: interface_frr_table
Fast\-ReRoute configuration
**type**\: :py:class:`InterfaceFrrTable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable>`
.. attribute:: interface_link_group
Provide link group name and level
**type**\: :py:class:`InterfaceLinkGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup>`
.. attribute:: metrics
Metric configuration
**type**\: :py:class:`Metrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics>`
.. attribute:: mpls_ldp
MPLS LDP configuration
**type**\: :py:class:`MplsLdp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp>`
.. attribute:: prefix_sid
Assign prefix SID to an interface, ISISPHPFlag will be rejected if set to disable, ISISEXPLICITNULLFlag will override the value of ISISPHPFlag
**type**\: :py:class:`PrefixSid <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.PrefixSid>`
.. attribute:: running
The presence of this object allows an address\-family to be run over the interface in question.This must be the first object created under the InterfaceAddressFamily container, and the last one deleted
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: weights
Weight configuration
**type**\: :py:class:`Weights <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.topology_name = None
self.admin_tags = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags()
self.admin_tags.parent = self
self.auto_metrics = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics()
self.auto_metrics.parent = self
self.interface_af_state = None
self.interface_frr_table = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable()
self.interface_frr_table.parent = self
self.interface_link_group = None
self.metrics = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics()
self.metrics.parent = self
self.mpls_ldp = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp()
self.mpls_ldp.parent = self
self.prefix_sid = None
self.running = None
self.weights = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights()
self.weights.parent = self
class PrefixSid(object):
"""
Assign prefix SID to an interface,
ISISPHPFlag will be rejected if set to
disable, ISISEXPLICITNULLFlag will
override the value of ISISPHPFlag
.. attribute:: explicit_null
Enable/Disable Explicit\-NULL flag
**type**\: :py:class:`IsisexplicitNullFlagEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisexplicitNullFlagEnum>`
**mandatory**\: True
.. attribute:: nflag_clear
Clear N\-flag for the prefix\-SID
**type**\: :py:class:`NflagClearEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.NflagClearEnum>`
**mandatory**\: True
.. attribute:: php
Enable/Disable Penultimate Hop Popping
**type**\: :py:class:`IsisphpFlagEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisphpFlagEnum>`
**mandatory**\: True
.. attribute:: type
SID type for the interface
**type**\: :py:class:`IsissidEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsissidEnum>`
**mandatory**\: True
.. attribute:: value
SID value for the interface
**type**\: int
**range:** 0..1048575
**mandatory**\: True
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.explicit_null = None
self.nflag_clear = None
self.php = None
self.type = None
self.value = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:prefix-sid'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.explicit_null is not None:
return True
if self.nflag_clear is not None:
return True
if self.php is not None:
return True
if self.type is not None:
return True
if self.value is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.PrefixSid']['meta_info']
class InterfaceFrrTable(object):
"""
Fast\-ReRoute configuration
.. attribute:: frr_exclude_interfaces
FRR exclusion configuration
**type**\: :py:class:`FrrExcludeInterfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces>`
.. attribute:: frr_remote_lfa_max_metrics
Remote LFA maxmimum metric
**type**\: :py:class:`FrrRemoteLfaMaxMetrics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics>`
.. attribute:: frr_remote_lfa_types
Remote LFA Enable
**type**\: :py:class:`FrrRemoteLfaTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes>`
.. attribute:: frr_types
Type of FRR computation per level
**type**\: :py:class:`FrrTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes>`
.. attribute:: frrlfa_candidate_interfaces
FRR LFA candidate configuration
**type**\: :py:class:`FrrlfaCandidateInterfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces>`
.. attribute:: frrtilfa_types
TI LFA Enable
**type**\: :py:class:`FrrtilfaTypes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_exclude_interfaces = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces()
self.frr_exclude_interfaces.parent = self
self.frr_remote_lfa_max_metrics = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics()
self.frr_remote_lfa_max_metrics.parent = self
self.frr_remote_lfa_types = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes()
self.frr_remote_lfa_types.parent = self
self.frr_types = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes()
self.frr_types.parent = self
self.frrlfa_candidate_interfaces = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces()
self.frrlfa_candidate_interfaces.parent = self
self.frrtilfa_types = Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes()
self.frrtilfa_types.parent = self
class FrrlfaCandidateInterfaces(object):
"""
FRR LFA candidate configuration
.. attribute:: frrlfa_candidate_interface
Include an interface to LFA candidate in computation
**type**\: list of :py:class:`FrrlfaCandidateInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frrlfa_candidate_interface = YList()
self.frrlfa_candidate_interface.parent = self
self.frrlfa_candidate_interface.name = 'frrlfa_candidate_interface'
class FrrlfaCandidateInterface(object):
"""
Include an interface to LFA candidate
in computation
.. attribute:: frr_type <key>
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
.. attribute:: interface_name <key>
Interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: level
Level
**type**\: int
**range:** 0..2
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = None
self.interface_name = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.frr_type is None:
raise YPYModelError('Key property frr_type is None')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frrlfa-candidate-interface[Cisco-IOS-XR-clns-isis-cfg:frr-type = ' + str(self.frr_type) + '][Cisco-IOS-XR-clns-isis-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
return True
if self.interface_name is not None:
return True
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces.FrrlfaCandidateInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frrlfa-candidate-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frrlfa_candidate_interface is not None:
for child_ref in self.frrlfa_candidate_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrlfaCandidateInterfaces']['meta_info']
class FrrRemoteLfaMaxMetrics(object):
"""
Remote LFA maxmimum metric
.. attribute:: frr_remote_lfa_max_metric
Configure the maximum metric for selecting a remote LFA node
**type**\: list of :py:class:`FrrRemoteLfaMaxMetric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_remote_lfa_max_metric = YList()
self.frr_remote_lfa_max_metric.parent = self
self.frr_remote_lfa_max_metric.name = 'frr_remote_lfa_max_metric'
class FrrRemoteLfaMaxMetric(object):
"""
Configure the maximum metric for
selecting a remote LFA node
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: max_metric
Value of the metric
**type**\: int
**range:** 1..16777215
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.max_metric = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-max-metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.max_metric is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics.FrrRemoteLfaMaxMetric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-max-metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_remote_lfa_max_metric is not None:
for child_ref in self.frr_remote_lfa_max_metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaMaxMetrics']['meta_info']
class FrrTypes(object):
"""
Type of FRR computation per level
.. attribute:: frr_type
Type of computation for prefixes reachable via interface
**type**\: list of :py:class:`FrrType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes.FrrType>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = YList()
self.frr_type.parent = self
self.frr_type.name = 'frr_type'
class FrrType(object):
"""
Type of computation for prefixes
reachable via interface
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: type
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-type[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes.FrrType']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-types'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
for child_ref in self.frr_type:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrTypes']['meta_info']
class FrrRemoteLfaTypes(object):
"""
Remote LFA Enable
.. attribute:: frr_remote_lfa_type
Enable remote lfa for a particular level
**type**\: list of :py:class:`FrrRemoteLfaType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_remote_lfa_type = YList()
self.frr_remote_lfa_type.parent = self
self.frr_remote_lfa_type.name = 'frr_remote_lfa_type'
class FrrRemoteLfaType(object):
"""
Enable remote lfa for a particular
level
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: type
Remote LFA Type
**type**\: :py:class:`IsisRemoteLfaEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisRemoteLfaEnum>`
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-type[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes.FrrRemoteLfaType']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-remote-lfa-types'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_remote_lfa_type is not None:
for child_ref in self.frr_remote_lfa_type:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrRemoteLfaTypes']['meta_info']
class FrrtilfaTypes(object):
"""
TI LFA Enable
.. attribute:: frrtilfa_type
Enable TI lfa for a particular level
**type**\: list of :py:class:`FrrtilfaType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frrtilfa_type = YList()
self.frrtilfa_type.parent = self
self.frrtilfa_type.name = 'frrtilfa_type'
class FrrtilfaType(object):
"""
Enable TI lfa for a particular level
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frrtilfa-type[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes.FrrtilfaType']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frrtilfa-types'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frrtilfa_type is not None:
for child_ref in self.frrtilfa_type:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrtilfaTypes']['meta_info']
class FrrExcludeInterfaces(object):
"""
FRR exclusion configuration
.. attribute:: frr_exclude_interface
Exclude an interface from computation
**type**\: list of :py:class:`FrrExcludeInterface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_exclude_interface = YList()
self.frr_exclude_interface.parent = self
self.frr_exclude_interface.name = 'frr_exclude_interface'
class FrrExcludeInterface(object):
"""
Exclude an interface from computation
.. attribute:: frr_type <key>
Computation Type
**type**\: :py:class:`IsisfrrEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.IsisfrrEnum>`
.. attribute:: interface_name <key>
Interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: level
Level
**type**\: int
**range:** 0..2
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.frr_type = None
self.interface_name = None
self.level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.frr_type is None:
raise YPYModelError('Key property frr_type is None')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-exclude-interface[Cisco-IOS-XR-clns-isis-cfg:frr-type = ' + str(self.frr_type) + '][Cisco-IOS-XR-clns-isis-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_type is not None:
return True
if self.interface_name is not None:
return True
if self.level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces.FrrExcludeInterface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:frr-exclude-interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_exclude_interface is not None:
for child_ref in self.frr_exclude_interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable.FrrExcludeInterfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interface-frr-table'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.frr_exclude_interfaces is not None and self.frr_exclude_interfaces._has_data():
return True
if self.frr_remote_lfa_max_metrics is not None and self.frr_remote_lfa_max_metrics._has_data():
return True
if self.frr_remote_lfa_types is not None and self.frr_remote_lfa_types._has_data():
return True
if self.frr_types is not None and self.frr_types._has_data():
return True
if self.frrlfa_candidate_interfaces is not None and self.frrlfa_candidate_interfaces._has_data():
return True
if self.frrtilfa_types is not None and self.frrtilfa_types._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceFrrTable']['meta_info']
class MplsLdp(object):
"""
MPLS LDP configuration
.. attribute:: sync_level
Enable MPLS LDP Synchronization for an IS\-IS level
**type**\: int
**range:** 0..2
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sync_level = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:mpls-ldp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.sync_level is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.MplsLdp']['meta_info']
class AutoMetrics(object):
"""
AutoMetric configuration
.. attribute:: auto_metric
AutoMetric Proactive\-Protect configuration. Legal value depends on the metric\-style specified for the topology. If the metric\-style defined is narrow, then only a value between <1\-63> is allowed and if the metric\-style is defined as wide, then a value between <1\-16777214> is allowed as the auto\-metric value
**type**\: list of :py:class:`AutoMetric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.auto_metric = YList()
self.auto_metric.parent = self
self.auto_metric.name = 'auto_metric'
class AutoMetric(object):
"""
AutoMetric Proactive\-Protect
configuration. Legal value depends on
the metric\-style specified for the
topology. If the metric\-style defined is
narrow, then only a value between <1\-63>
is allowed and if the metric\-style is
defined as wide, then a value between
<1\-16777214> is allowed as the
auto\-metric value.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: proactive_protect
Allowed auto metric\:<1\-63> for narrow ,<1\-16777214> for wide
**type**\: int
**range:** 1..16777214
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.proactive_protect = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:auto-metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.proactive_protect is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics.AutoMetric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:auto-metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.auto_metric is not None:
for child_ref in self.auto_metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AutoMetrics']['meta_info']
class AdminTags(object):
"""
admin\-tag configuration
.. attribute:: admin_tag
Admin tag for advertised interface connected routes
**type**\: list of :py:class:`AdminTag <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.admin_tag = YList()
self.admin_tag.parent = self
self.admin_tag.name = 'admin_tag'
class AdminTag(object):
"""
Admin tag for advertised interface
connected routes
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: admin_tag
Tag to associate with connected routes
**type**\: int
**range:** 1..4294967295
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.admin_tag = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:admin-tag[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.admin_tag is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags.AdminTag']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:admin-tags'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.admin_tag is not None:
for child_ref in self.admin_tag:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.AdminTags']['meta_info']
class InterfaceLinkGroup(object):
"""
Provide link group name and level
.. attribute:: level
Level in which link group will be effective
**type**\: int
**range:** 0..2
.. attribute:: link_group
Link Group
**type**\: str
**range:** 0..40
**mandatory**\: True
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.level = None
self.link_group = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interface-link-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.level is not None:
return True
if self.link_group is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.InterfaceLinkGroup']['meta_info']
class Metrics(object):
"""
Metric configuration
.. attribute:: metric
Metric configuration. Legal value depends on the metric\-style specified for the topology. If the metric\-style defined is narrow, then only a value between <1\-63> is allowed and if the metric\-style is defined as wide, then a value between <1\-16777215> is allowed as the metric value. All routers exclude links with the maximum wide metric (16777215) from their SPF
**type**\: list of :py:class:`Metric <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.metric = YList()
self.metric.parent = self
self.metric.name = 'metric'
class Metric(object):
"""
Metric configuration. Legal value depends on
the metric\-style specified for the topology. If
the metric\-style defined is narrow, then only a
value between <1\-63> is allowed and if the
metric\-style is defined as wide, then a value
between <1\-16777215> is allowed as the metric
value. All routers exclude links with the
maximum wide metric (16777215) from their SPF
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: metric
Allowed metric\: <1\-63> for narrow, <1\-16777215> for wide
**type**\: one of the below types:
**type**\: :py:class:`MetricEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum>`
**mandatory**\: True
----
**type**\: int
**range:** 1..16777215
**mandatory**\: True
----
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.metric = None
class MetricEnum(Enum):
"""
MetricEnum
Allowed metric\: <1\-63> for narrow,
<1\-16777215> for wide
.. data:: MAXIMUM = 16777215
Maximum wide metric. All routers will
exclude this link from their SPF
"""
MAXIMUM = 16777215
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric.MetricEnum']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metric[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.metric is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics.Metric']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:metrics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.metric is not None:
for child_ref in self.metric:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Metrics']['meta_info']
class Weights(object):
"""
Weight configuration
.. attribute:: weight
Weight configuration under interface for load balancing
**type**\: list of :py:class:`Weight <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.weight = YList()
self.weight.parent = self
self.weight.name = 'weight'
class Weight(object):
"""
Weight configuration under interface for load
balancing
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: weight
Weight to be configured under interface for Load Balancing. Allowed weight\: <1\-16777215>
**type**\: int
**range:** 1..16777214
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.weight = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:weight[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.weight is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights.Weight']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:weights'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.weight is not None:
for child_ref in self.weight:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName.Weights']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.topology_name is None:
raise YPYModelError('Key property topology_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:topology-name[Cisco-IOS-XR-clns-isis-cfg:topology-name = ' + str(self.topology_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.topology_name is not None:
return True
if self.admin_tags is not None and self.admin_tags._has_data():
return True
if self.auto_metrics is not None and self.auto_metrics._has_data():
return True
if self.interface_af_state is not None:
return True
if self.interface_frr_table is not None and self.interface_frr_table._has_data():
return True
if self.interface_link_group is not None and self.interface_link_group._has_data():
return True
if self.metrics is not None and self.metrics._has_data():
return True
if self.mpls_ldp is not None and self.mpls_ldp._has_data():
return True
if self.prefix_sid is not None and self.prefix_sid._has_data():
return True
if self.running is not None:
return True
if self.weights is not None and self.weights._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf.TopologyName']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.af_name is None:
raise YPYModelError('Key property af_name is None')
if self.saf_name is None:
raise YPYModelError('Key property saf_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interface-af[Cisco-IOS-XR-clns-isis-cfg:af-name = ' + str(self.af_name) + '][Cisco-IOS-XR-clns-isis-cfg:saf-name = ' + str(self.saf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.af_name is not None:
return True
if self.saf_name is not None:
return True
if self.interface_af_data is not None and self.interface_af_data._has_data():
return True
if self.topology_name is not None:
for child_ref in self.topology_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs.InterfaceAf']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interface-afs'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_af is not None:
for child_ref in self.interface_af:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.InterfaceAfs']['meta_info']
class CsnpIntervals(object):
"""
CSNP\-interval configuration
.. attribute:: csnp_interval
CSNP\-interval configuration. No fixed default value as this depends on the media type of the interface
**type**\: list of :py:class:`CsnpInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals.CsnpInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.csnp_interval = YList()
self.csnp_interval.parent = self
self.csnp_interval.name = 'csnp_interval'
class CsnpInterval(object):
"""
CSNP\-interval configuration. No fixed
default value as this depends on the media
type of the interface.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: interval
Seconds
**type**\: int
**range:** 0..65535
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:csnp-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals.CsnpInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:csnp-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.csnp_interval is not None:
for child_ref in self.csnp_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.CsnpIntervals']['meta_info']
class LspIntervals(object):
"""
LSP\-interval configuration
.. attribute:: lsp_interval
Interval between transmission of LSPs on interface
**type**\: list of :py:class:`LspInterval <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_cfg.Isis.Instances.Instance.Interfaces.Interface.LspIntervals.LspInterval>`
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.lsp_interval = YList()
self.lsp_interval.parent = self
self.lsp_interval.name = 'lsp_interval'
class LspInterval(object):
"""
Interval between transmission of LSPs on
interface.
.. attribute:: level <key>
Level to which configuration applies
**type**\: :py:class:`IsisInternalLevelEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_clns_isis_datatypes.IsisInternalLevelEnum>`
.. attribute:: interval
Milliseconds
**type**\: int
**range:** 1..4294967295
**mandatory**\: True
"""
_prefix = 'clns-isis-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.level = None
self.interval = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.level is None:
raise YPYModelError('Key property level is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-interval[Cisco-IOS-XR-clns-isis-cfg:level = ' + str(self.level) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.level is not None:
return True
if self.interval is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.LspIntervals.LspInterval']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:lsp-intervals'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.lsp_interval is not None:
for child_ref in self.lsp_interval:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface.LspIntervals']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interface[Cisco-IOS-XR-clns-isis-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.bfd is not None and self.bfd._has_data():
return True
if self.circuit_type is not None:
return True
if self.csnp_intervals is not None and self.csnp_intervals._has_data():
return True
if self.hello_accept_passwords is not None and self.hello_accept_passwords._has_data():
return True
if self.hello_intervals is not None and self.hello_intervals._has_data():
return True
if self.hello_multipliers is not None and self.hello_multipliers._has_data():
return True
if self.hello_paddings is not None and self.hello_paddings._has_data():
return True
if self.hello_passwords is not None and self.hello_passwords._has_data():
return True
if self.interface_afs is not None and self.interface_afs._has_data():
return True
if self.link_down_fast_detect is not None:
return True
if self.lsp_fast_flood_thresholds is not None and self.lsp_fast_flood_thresholds._has_data():
return True
if self.lsp_intervals is not None and self.lsp_intervals._has_data():
return True
if self.lsp_retransmit_intervals is not None and self.lsp_retransmit_intervals._has_data():
return True
if self.lsp_retransmit_throttle_intervals is not None and self.lsp_retransmit_throttle_intervals._has_data():
return True
if self.mesh_group is not None:
return True
if self.point_to_point is not None:
return True
if self.priorities is not None and self.priorities._has_data():
return True
if self.running is not None:
return True
if self.state is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-clns-isis-cfg:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance.Interfaces']['meta_info']
@property
def _common_path(self):
if self.instance_name is None:
raise YPYModelError('Key property instance_name is None')
return '/Cisco-IOS-XR-clns-isis-cfg:isis/Cisco-IOS-XR-clns-isis-cfg:instances/Cisco-IOS-XR-clns-isis-cfg:instance[Cisco-IOS-XR-clns-isis-cfg:instance-name = ' + str(self.instance_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.instance_name is not None:
return True
if self.afs is not None and self.afs._has_data():
return True
if self.distribute is not None and self.distribute._has_data():
return True
if self.dynamic_host_name is not None:
return True
if self.ignore_lsp_errors is not None:
return True
if self.interfaces is not None and self.interfaces._has_data():
return True
if self.is_type is not None:
return True
if self.link_groups is not None and self.link_groups._has_data():
return True
if self.log_adjacency_changes is not None:
return True
if self.log_pdu_drops is not None:
return True
if self.lsp_accept_passwords is not None and self.lsp_accept_passwords._has_data():
return True
if self.lsp_arrival_times is not None and self.lsp_arrival_times._has_data():
return True
if self.lsp_check_intervals is not None and self.lsp_check_intervals._has_data():
return True
if self.lsp_generation_intervals is not None and self.lsp_generation_intervals._has_data():
return True
if self.lsp_lifetimes is not None and self.lsp_lifetimes._has_data():
return True
if self.lsp_mtus is not None and self.lsp_mtus._has_data():
return True
if self.lsp_passwords is not None and self.lsp_passwords._has_data():
return True
if self.lsp_refresh_intervals is not None and self.lsp_refresh_intervals._has_data():
return True
if self.max_link_metrics is not None and self.max_link_metrics._has_data():
return True
if self.nets is not None and self.nets._has_data():
return True
if self.nsf is not None and self.nsf._has_data():
return True
if self.nsr is not None:
return True
if self.overload_bits is not None and self.overload_bits._has_data():
return True
if self.running is not None:
return True
if self.srgb is not None and self.srgb._has_data():
return True
if self.trace_buffer_size is not None and self.trace_buffer_size._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances.Instance']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-clns-isis-cfg:isis/Cisco-IOS-XR-clns-isis-cfg:instances'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.instance is not None:
for child_ref in self.instance:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis.Instances']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-clns-isis-cfg:isis'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.instances is not None and self.instances._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_cfg as meta
return meta._meta_table['Isis']['meta_info']
| 0.012164 |
import ST_Reader
import flask
r = ST_Reader.Reader()
application = flask.Flask(__name__)
@application.route('/')
def hello():
return "Welcome to ST API"
@application.route('/routes', methods=['GET'])
def get_routes():
return flask.jsonify({'data': r.get_routes(), 'update': r.get_last_update('trip_updates')})
@application.route('/<route_id>/stops', methods=['GET'])
def get_stops(route_id):
return flask.jsonify({'data': r.get_stops([route_id]), 'update': r.get_last_update('trip_updates')})
@application.route('/<stop_id>/times', methods=['GET'])
def get_stop_times(stop_id):
return flask.jsonify({'data': r.get_stop_times([stop_id]), 'update': r.get_last_update('trip_updates')})
@application.route('/<stop_id>/times/next', methods=['GET'])
def get_next_stop_time(stop_id):
return flask.jsonify({'data': r.get_next_stop_time(stop_id), 'update': r.get_last_update('trip_updates')})
@application.route('/<stop_id>/name', methods=['GET'])
def get_stop_name(stop_id):
return flask.jsonify({'data': r.get_stop_name(stop_id), 'update': r.get_last_update('stops')})
if __name__ == '__main__':
application.run()
| 0.017528 |
"""Create a development and testing environment using a virtualenv."""
from __future__ import unicode_literals
import os
import subprocess
import sys
if sys.version_info[0] >= 3:
VENV_NAME = '.venv3'
# FIXME: running 2to3 on django-nose will no longer be required once
# the project supports Python 3 (bug #16).
PATCH_DJANGO_NOSE = True
else:
VENV_NAME = '.venv'
PATCH_DJANGO_NOSE = False
TESTS = os.path.abspath(os.path.dirname(__file__))
REQUIREMENTS = os.path.join(TESTS, 'requirements.pip')
WITH_VENV = os.path.join(TESTS, 'with_venv.sh')
VENV = os.path.abspath(os.path.join(TESTS, '..', VENV_NAME))
def call(*args):
"""Simple ``subprocess.call`` wrapper."""
if subprocess.call(args):
raise SystemExit('Error running {0}.'.format(args))
def pip_install(*args):
"""Install packages using pip inside the virtualenv."""
call(WITH_VENV, VENV_NAME, 'pip', 'install', '--use-mirrors', *args)
def patch_django_nose():
"""Run 2to3 on django-nose and remove ``import new`` from its runner."""
# FIXME: delete once django-nose supports Python 3 (bug #16).
python = 'python' + '.'.join(map(str, sys.version_info[:2]))
django_nose = os.path.join(
VENV, 'lib', python, 'site-packages', 'django_nose')
call('2to3', '-w', '--no-diffs', django_nose)
with open(os.path.join(django_nose, 'runner.py'), 'r+') as f:
lines = [line for line in f.readlines() if 'import new' not in line]
f.seek(0)
f.truncate()
f.writelines(lines)
if __name__ == '__main__':
call('virtualenv', '--distribute', '-p', sys.executable, VENV)
pip_install('-r', REQUIREMENTS)
# FIXME: delete from now on once django-nose supports Python 3 (bug #16).
if PATCH_DJANGO_NOSE:
patch_django_nose()
| 0 |
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import confluent.discovery.handlers.bmc as bmchandler
import pyghmi.exceptions as pygexc
import pyghmi.ipmi.private.util as pygutil
import confluent.util as util
import struct
class NodeHandler(bmchandler.NodeHandler):
devname = 'IMM'
@classmethod
def adequate(cls, info):
# We can sometimes receive a partially initialized SLP packet
# This is not adequate for being satisfied
return bool(info.get('attributes', {}))
def scan(self):
slpattrs = self.info.get('attributes', {})
self.isdense = False
try:
ff = slpattrs.get('enclosure-form-factor', [''])[0]
except IndexError:
return
wronguuid = slpattrs.get('node-uuid', [''])[0]
if wronguuid:
# we need to fix the first three portions of the uuid
uuidprefix = wronguuid.split('-')[:3]
uuidprefix = codecs.encode(struct.pack(
'<IHH', *[int(x, 16) for x in uuidprefix]), 'hex')
uuidprefix = util.stringify(uuidprefix)
uuidprefix = uuidprefix[:8] + '-' + uuidprefix[8:12] + '-' + \
uuidprefix[12:16]
self.info['uuid'] = uuidprefix + '-' + '-'.join(
wronguuid.split('-')[3:])
self.info['uuid'] = self.info['uuid'].lower()
room = slpattrs.get('room-id', [None])[0]
if room:
self.info['room'] = room
rack = slpattrs.get('rack-id', [None])[0]
if rack:
self.info['rack'] = rack
name = slpattrs.get('name', [None])[0]
if name:
self.info['hostname'] = name
unumber = slpattrs.get('lowest-u', [None])[0]
if unumber:
self.info['u'] = unumber
location = slpattrs.get('location', [None])[0]
if location:
self.info['location'] = location
if ff not in ('dense-computing', 'BC2'):
# do not probe unless it's a dense platform
return
self.isdense = True
encuuid = slpattrs.get('chassis-uuid', [None])[0]
if encuuid:
self.info['enclosure.uuid'] = encuuid
slot = int(slpattrs.get('slot', ['0'])[0])
if slot != 0:
self.info['enclosure.bay'] = slot
def probe(self):
if self.info.get('enclosure.bay', 0) == 0:
self.scan()
if self.info.get('enclosure.bay', 0) != 0:
# scan has already populated info
return
ff = self.info.get('attributes', {}).get('enclosure-form-factor', '')
if ff != 'dense-computing':
return
try:
# we are a dense platform, but the SLP data did not give us slot
# attempt to probe using IPMI
ipmicmd = self._get_ipmicmd()
guiddata = ipmicmd.xraw_command(netfn=6, command=8)
self.info['uuid'] = pygutil.decode_wireformat_uuid(
guiddata['data']).lower()
ipmicmd.oem_init()
bayid = ipmicmd._oem.immhandler.get_property(
'/v2/cmm/sp/7')
if not bayid:
return
self.info['enclosure.bay'] = int(bayid)
smmid = ipmicmd._oem.immhandler.get_property(
'/v2/ibmc/smm/chassis/uuid')
if not smmid:
return
smmid = smmid.lower().replace(' ', '')
smmid = '{0}-{1}-{2}-{3}-{4}'.format(smmid[:8], smmid[8:12],
smmid[12:16], smmid[16:20],
smmid[20:])
self.info['enclosure.uuid'] = smmid
self.info['enclosure.type'] = 'smm'
except pygexc.IpmiException as ie:
print(repr(ie))
raise
| 0.000687 |
#!/usr/bin/env python
# coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import functools
import sys
import cyclone.redis
import cyclone.web
from twisted.python import log
from twisted.internet import defer, reactor
class Application(cyclone.web.Application):
def __init__(self):
# Defaults to localhost:6379, dbid=0
redisdb = cyclone.redis.lazyConnectionPool()
handlers = [
(r"/", IndexHandler, dict(redisdb=redisdb)),
]
cyclone.web.Application.__init__(self, handlers, debug=True)
def HTTPBasic(method):
@defer.inlineCallbacks
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
try:
auth_type, auth_data = \
self.request.headers["Authorization"].split()
assert auth_type == "Basic"
usr, pwd = base64.b64decode(auth_data).split(":", 1)
except:
raise cyclone.web.HTTPAuthenticationRequired
try:
redis_pwd = yield self.redisdb.get("cyclone:%s" % usr)
except Exception, e:
log.msg("Redis failed to get('cyclone:%s'): %s" % (usr, str(e)))
raise cyclone.web.HTTPError(503) # Service Unavailable
if pwd != str(redis_pwd):
raise cyclone.web.HTTPAuthenticationRequired
else:
self._current_user = usr
defer.returnValue(method(self, *args, **kwargs))
return wrapper
class IndexHandler(cyclone.web.RequestHandler):
def initialize(self, redisdb):
self.redisdb = redisdb
@HTTPBasic
def get(self):
self.write("Hi, %s." % self._current_user)
def main():
log.startLogging(sys.stdout)
log.msg(">>>> Set the password from command line: "
"redis-cli set cyclone:root 123")
log.msg(">>>> Then authenticate as root/123 from the browser")
reactor.listenTCP(8888, Application(), interface="127.0.0.1")
reactor.run()
if __name__ == "__main__":
main()
| 0.000388 |
import calendar
import datetime
import re
import sys
import urllib
import urlparse
import unicodedata
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_unicode(urllib.unquote(smart_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, unicode)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_unicode(urllib.unquote_plus(smart_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an floating point number expressed in seconds since the epoch, in
UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int.
if value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if not 0 <= i <= sys.maxint:
raise ValueError("Base36 conversion input too large or incorrect type.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
if sys.version_info >= (2, 6):
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
else:
# Python 2.5 compatibility. This actually works for Python 2.6 and above,
# but the above definition is much more obviously correct and so is
# preferred going forward.
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
# Chrome treats \ completely as /
url = url.replace('\\', '/')
# Chrome considers any URL with more than two slashes to be absolute, but
# urlaprse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse.urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info[1] and url_info[0]:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(unicode(url[0]))[0] == 'C':
return False
return (not url_info[1] or url_info[1] == host) and \
(not url_info[0] or url_info[0] in ['http', 'https'])
| 0.00281 |
# encoding: utf-8
"""
corduroy.couchdb
Basic python mapping of the CouchDB HTTP api.
"""
import os, re
import mimetypes
from urlparse import urlsplit, urlunsplit
from .io import Resource, ChangesFeed, quote, urlencode, is_relaxed
from .exceptions import HTTPError, PreconditionFailed, NotFound, ServerError, Unauthorized, \
Conflict, ConflictResolution
from .atoms import View, Row, Document, Status, adict, odict
from .config import defaults, json
__all__ = ['Couch', 'Database']
def NOOP(*args): return args
class Couch(object):
"""Represents a CouchDB server.
Useful for creating/deleting DBs and dealing with system-level functionality such
as replication and task monitoring."""
def __init__(self, url=None, auth=None, full_commit=True):
"""Initialize the server object.
Args:
url (str): url of the couchdb server
auth (tuple): login information. e.g., ('username', 'password')
Kwargs:
full_commit (bool): include the X-Couch-Full-Commit header
"""
if url is None or isinstance(url, basestring):
self.resource = Resource(url, auth=auth)
else:
self.resource = url # treat as a Resource object
if not full_commit:
self.resource.headers['X-Couch-Full-Commit'] = 'false'
def __contains__(self, name):
"""Return whether the server contains a database with the specified
name. (synchronous)
"""
try:
self.resource.head(validate_dbname(name))
return True
except NotFound:
return False
def __iter__(self):
"""Iterate over the names of all databases. (synchronous)"""
data = self.resource.get_json('_all_dbs')
return iter(data)
def __len__(self):
"""Return the number of databases. (synchronous)"""
data = self.resource.get_json('_all_dbs')
return len(data)
def __nonzero__(self):
"""Return whether the server is available. (synchronous)"""
try:
self.resource.head()
return True
except:
return False
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.resource.url)
def __delitem__(self, name):
"""Remove the database with the specified name. (synchronous)"""
self.resource.delete_json(validate_dbname(name))
def __getitem__(self, name):
"""Return a `Database` object representing the database with thespecified name.
(synchronous)"""
db = Database(self.resource(name))
db.resource.head() # actually make a request to the database
return db
def config(self, name=None, value=None, delete=False, callback=None):
"""Get/set the configuration of the CouchDB server (or a field thereof).
Args:
name (str): optional path to a sub-field of the config dict
value (str,dict,int): optional new value for the path specified by `name`
Kwargs:
delete (bool): if true, delete the path speficied by `name`
Returns:
When called without a `name` arg, returns the entire config dictionary.
When `name` is specified, returns the value of that sub-path
When `value` is specified, returns None
When `delete` is specified, returns the pre-deletion value
"""
if delete:
assert(value is None)
if not name:
resource = self.resource('_config')
else:
resource = self.resource('_config', *name.split('/'))
if not value and delete is True:
return resource.delete_json(callback=callback)
if value:
return resource.put_json(body=json.encode(value), callback=callback)
else:
return resource.get_json(callback=callback)
def version(self, callback=None):
"""The version string of the CouchDB server.
"""
def postproc(data, status):
if status.ok:
data = data['version']
return data, status
return self.resource.get_json(callback=callback, process=postproc)
def stats(self, name=None, callback=None):
"""Server statistics.
Args:
name (str): optional sub-path of stats dict to return
Returns:
When called without args, returns the entire stats dictionary
When `name` is specified, returns the value at that sub-path
Raises:
NotFound (when provided `name` is invalid)
"""
if not name:
resource = self.resource('_stats')
else:
resource = self.resource('_stats', *name.split('/'))
return resource.get_json(callback=callback)
def tasks(self, callback=None):
"""A list of tasks currently active on the server."""
return self.resource.get_json('_active_tasks', callback=callback)
def uuids(self, count=1, callback=None):
"""Retrieve a batch of uuids
Args:
count (int): optional number of uuids to retrieve
Returns:
list. A list of uuid strings of length=count
"""
def postproc(data, status):
if status.ok:
data = data['uuids']
return data, status
return self.resource.get_json('_uuids', process=postproc, callback=callback, count=count)
def db(self, name, create_if_missing=False, callback=None):
"""Initialize a Database object corrsponding to a particular db name.
Args:
name (str): The name of the database (without the server's url prefix)
create_if_missing (bool): If true, will handle NotFound errors by creating
the database specified by `name`
Kwargs:
create_if_missing (bool): if True, attempt to create the database if the
initial request results in a NotFound
Returns:
Database. An initialized Database object
Raises:
NotFound (when database does not exists and create_if_missing==False)
"""
_db = Database(self.resource(name))
def handle_missing(data, status):
if status.ok:
data = _db
elif status.error is NotFound and create_if_missing:
return self.create(name, callback=callback)
elif callback:
callback(data, status)
return data, status
if callback:
return _db.resource.get_json(callback=handle_missing)
else:
try:
return _db.resource.get_json(process=handle_missing)
except NotFound:
if not create_if_missing: raise
return self.create(name)
def all_dbs(self, callback=None):
"""Retrieve the list of database names on this server"""
return self.resource.get_json('_all_dbs', callback=callback)
def create(self, name, callback=None):
"""Create a new database with the given name.
Args:
name (str): The name of the database to create (without the server's url prefix)
Returns:
Database. An initialized Database object
Raises:
PreconditionFailed (if the database already exists)
"""
def postproc(data, status):
if status.ok:
db = Database(self.resource(name))
data = db
return data, status
return self.resource.put_json(validate_dbname(name), process=postproc, callback=callback)
def delete(self, name, callback=None):
"""Delete the database with the specified name.
Args:
name (str): The name of the database to delete
Raises:
NotFound (when database does not exists)
"""
return self.resource.delete_json(validate_dbname(name), callback=callback)
def replicate(self, source, target, callback=None, **options):
"""Replicate changes from the source database to the target database.
Args:
source (str, Database): either a full url to the source database (with authentication
provided inline) or an initialized Database object
target (str, Database): the url or Database object of the target db
Kwargs:
_id (str): an optional replication_id. If provided, a doc will be created in
the `_replicator` database for subsequent querying. If not provided, the
legacy `_replicate` API will be used instead.
cancel (bool): if true, cancel the replication
continuous (bool): if True, set the replication to be continuous
create_target (bool): if True, creates the target database
doc_ids (list): optional list of document IDs to be synchronized
proxy (str): optional address of a proxy server to use
"""
if hasattr(source,'resource'):
source = source.resource.auth_url
if hasattr(target,'resource'):
target = target.resource.auth_url
data = {'source': source, 'target': target}
data.update(options)
if '_id' in options:
return self.resource.post_json('_replicator', data, callback=callback)
else:
return self.resource.post_json('_replicate', data, callback=callback)
@property
def users(self):
"""The _users system database.
Returns:
Database. This property is a synonym for `self.db('_users')`
"""
return Database(self.resource('_users'))
@property
def replicator(self):
"""The _replicator system database
Returns:
Database. This property is a synonym for `self.db('_replicator')`
"""
return Database(self.resource('_replicator'))
class Database(object):
"""Represents a single DB on a couch server.
This is the primary class for interacting with documents, views, changes, et al."""
def __init__(self, name, auth=None):
"""Initialize the database object.
Args:
name (str): either a full url path to the database, or just a database name
(to which the host specified in corduroy.defaults will be prepended)
auth (tuple): optional login information. e.g., ('username', 'password')
"""
if isinstance(name, basestring):
self.resource = Resource(name, auth=auth)
elif isinstance(name, Resource):
self.resource = name
else:
raise ValueError('expected str, got %s'%type(name))
self.name = validate_dbname(self.resource.url.split('/')[-1], encoded=True)
self._uuids = []
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.name)
def __contains__(self, id):
"""Return whether the database contains a document with the specified
ID. (synchronous)
"""
try:
data, status = _doc_resource(self.resource, id).head()
return True
except NotFound:
return False
def __iter__(self):
"""Return the IDs of all documents in the database. (synchronous)"""
return iter([item.id for item in self.view('_all_docs')])
def __len__(self):
"""Return the number of documents in the database. (synchronous)"""
data = self.resource.get_json()
return data['doc_count']
def __nonzero__(self):
"""Return whether the database is available. (synchronous)"""
try:
self.resource.head()
return True
except:
return False
def __delitem__(self, id):
"""Remove the document with the specified ID from the database. (synchronous)
"""
resource = _doc_resource(self.resource, id)
resp = resource.head()
def postproc(data, status):
result = resource.delete_json(rev=status.headers['etag'].strip('"'))
return result, status
return resource.head(process=postproc)
def __getitem__(self, id):
"""Return the document with the specified ID. (synchronous)
"""
data = _doc_resource(self.resource, id).get_json()
return defaults.types.doc(data)
def __setitem__(self, id, content):
"""Create or update a document with the specified ID. (synchronous)
"""
# content.setdefault('_id', id)
content['_id'] = id
return self.save(content)
def exists(self, callback=None):
"""Check whether this Database object corresponds to an existing db on the server
Returns:
boolean
"""
def postproc(data, status):
data = status.error is not NotFound
return data, status
try:
return self.resource.head(process=postproc, callback=callback)
except NotFound:
return False
# def postproc(data, status):
# data = status.error is not NotFound
# return data, status
# data = self.resource.head(process=postproc, callback=callback)
@property
def _couch(self):
"""Creates an instance of the parent Couch object (ici il y avoir des dragons)"""
parts = urlsplit(self.resource.auth_url)
path = "/".join(parts.path.split('/')[:-1])
parts = list(parts)
parts[2] = path
return Couch(urlunsplit(tuple(parts)))
def push(self, target, callback=None, **options):
"""Initiate a replication from this database to a target url
Args:
target (str, Database): the target database
Kwargs (c.f., Couch.replicate):
_id (str): an optional replication_id. If provided, a doc will be created in
the `_replicator` database for subsequent querying. If not provided, the
legacy `_replicate` API will be used instead.
cancel (bool): if true, cancel the replication
continuous (bool): if True, set the replication to be continuous
create_target (bool): if True, creates the target database
doc_ids (list): optional list of document IDs to be synchronized
proxy (str): optional address of a proxy server to use
"""
return self._couch.replicate(self, target, callback=callback, **options)
def pull(self, source, callback=None, **options):
"""Initiate a replication from a source url to this database
Args:
source (str, Database): the database from which to replicate
Kwargs (c.f., Couch.replicate):
_id (str): an optional replication_id. If provided, a doc will be created in
the `_replicator` database for subsequent querying. If not provided, the
legacy `_replicate` API will be used instead.
cancel (bool): if true, cancel the replication
continuous (bool): if True, set the replication to be continuous
create_target (bool): if True, creates the target database
doc_ids (list): optional list of document IDs to be synchronized
proxy (str): optional address of a proxy server to use
"""
return self._couch.replicate(source, self, callback=callback, **options)
def cleanup(self, callback=None):
"""Clean up old design document indexes.
Returns:
dict of the form `{ok:True}`
"""
def postproc(data, status):
if status.ok:
data=data['ok']
return data, status
headers = {'Content-Type': 'application/json'}
return self.resource('_view_cleanup').post_json(headers=headers, process=postproc, callback=callback)
def commit(self, callback=None):
"""If the server is configured to delay commits, or previous requests
used the special ``X-Couch-Full-Commit: false`` header to disable
immediate commits, this method can be used to ensure that any
non-committed changes are committed to physical storage.
"""
headers={'Content-Type': 'application/json'}
return self.resource.post_json('_ensure_full_commit', headers=headers, callback=callback)
def compact(self, ddoc=None, callback=None):
"""Compact the database or a design document's index.
Args:
ddoc (str): optional design doc name
"""
if ddoc:
resource = self.resource('_compact', ddoc)
else:
resource = self.resource('_compact')
headers={'Content-Type': 'application/json'}
def postproc(data, status):
if status.ok: # do i need the error checking here? for async maybe? o_O
data=data['ok']
return data, status
return resource.post_json(headers=headers, process=postproc, callback=callback)
def security(self, value=None, callback=None):
"""Access the database's _security object
If called without arguments, returns the security object. If called with a
dict, sets the security object to that value.
Args:
value (dict): the new security object to be saved to the database
Returns:
dict. The current security object.
"""
if hasattr(value,'items'):
def postproc(data, status):
if status.ok:
data=value
return data, status
headers = {'Content-Type': 'application/json'}
return self.resource.put_json('_security', body=value, headers=headers, process=postproc, callback=callback)
else:
return self.resource.get_json('_security', callback=callback)
def _bulk_get(self, doc_ids, include_docs=True, process=None, callback=None, **options):
"""Perform a bulk fetch of documents (ici il y avoir des dragons)"""
propterhoc = process or NOOP
def posthoc(view, status):
# pull the docs out of their rows and build a list of docs and Nones
# depending on whether the fetch was successful
data = view
if status.ok:
if include_docs:
docs = [row.doc for row in view]
missing = sum(1 for d in docs if d is None)
data = docs
else:
vals = [(row.key, row.value) for row in view]
missing = sum(1 for v in vals if 'error' in v[1])
doc_stubs = []
for doc_id, doc_val in vals:
doc_stubs.append(defaults.types.doc(
_id=doc_id, _rev=doc_val.get('rev'), _stub=True
))
data = doc_stubs
return propterhoc(data, status)
if doc_ids is False:
# using false here from the default value in .get (avoiding None in case a user variable thought
# to be containing a string gets passed as the doc id. best not to do a full-db get in response...)
options.setdefault('limit',50)
return self.view('_all_docs', include_docs=include_docs, process=posthoc, callback=callback, **options)
return self.view('_all_docs',keys=doc_ids, include_docs=include_docs, process=posthoc, callback=callback, **options)
def get(self, id_or_ids=False, callback=None, **options):
"""Return the document(s) with the specified ID(s).
Args:
id_or_ids (str, list): either a single ID or a list of IDs to fetch in a bulk request
Kwargs:
rev (str): if present, specifies a specific revision to retrieve
revs (bool): if true, add a _revs attribute to the returned doc
revs_info (bool): if true, add a _revs_info attribute to the returned doc
When called with a list of IDs, all standard view options can be applied
(see Database.view for a complete listing).
Returns:
If a single ID is requested, returns a corresponding Document or raises an
exception if the doc does not exist.
If a list of IDs was requested, returns a corresponding list of Document objects.
Requesting IDs referencing non-existent docs will not raise an exception but will
place a corresponding None in the list of returned docs.
Raises:
NotFound (when a single ID is requested and no corresponding doc is found)
"""
if not isinstance(id_or_ids, basestring):
return self._bulk_get(id_or_ids, callback=callback, **options)
def postproc(data, status):
if status.ok:
if isinstance(data, (list,tuple)):
data = [defaults.types.doc(d) for d in data]
else:
data = defaults.types.doc(data)
return data, status
return _doc_resource(self.resource, id_or_ids).get_json(process=postproc, callback=callback, **options)
def _solo_save(self, doc, force=False, merge=None, callback=None, **options):
"""Perform a single-document update (ici il y avoir des dragons)"""
if '_id' in doc:
put_or_post = _doc_resource(self.resource, doc['_id']).put_json
else:
put_or_post = self.resource.post_json
def soloproc(data, status):
if status.ok:
id, rev = data['id'], data.get('rev')
doc['_id'] = id
if rev is not None: # Not present for batch='ok'
doc['_rev'] = rev
data = ConflictResolution(self, [data], [doc])
else:
conflicts = ConflictResolution(self, [status.response], [doc])
if force:
return conflicts.overwrite(callback=callback), status
elif merge:
return conflicts.merge(merge, callback=callback), status
data = conflicts
if status.error is Conflict:
status.exception = data
if callback:
callback(data, status)
return data, status
if callback:
return put_or_post(body=doc, callback=soloproc, **options)
else:
return put_or_post(body=doc, process=soloproc, **options)
def _bulk_save(self, docs, force=False, merge=None, callback=None, **options):
"""Perform a multi-document update (ici il y avoir des dragons)"""
to_post = []
for doc in docs:
if isinstance(doc, dict):
to_post.append(doc)
elif hasattr(doc, 'items'):
to_post.append(dict(doc.items()))
else:
raise TypeError('expected dict, got %s' % type(doc))
def bulkproc(data, status):
# print "[%i]"%status.code
handle_remaining = callback or NOOP
if status.ok:
conflicts = ConflictResolution(self, data, docs)
data = conflicts
if conflicts.pending:
if force:
return conflicts.overwrite(callback=callback), status
elif merge:
return conflicts.merge(merge,callback=callback), status
# no conflicts, returning after the single round-trip
return handle_remaining(data, status)
content = dict(docs=to_post)
content.update(options)
cb = proc = None
if callback: cb = bulkproc
else: proc = bulkproc
return self.resource.post_json('_bulk_docs', body=content, process=proc, callback=cb, **options)
def save(self, doc_or_docs=None, merge=None, force=False, callback=None, **options):
"""Create a new document or update an existing document.
Args:
doc_or_docs (dict, list): either a single dict-like object or a list of many
Kwargs:
batch (str): if equal to "ok" when submitting a single document, the server will
defer writing to disk (speeding throughput but risking data loss)
all_or_nothing (boolean): if True when submitting a list of documents, conflict
checking will be disabled and either the entire list of docs will be written
to the database or none will (see couchdb docs for more details on the not-
entirely-intuitive semantics).
force (bool): if True, will retry any writes that caused a conflict after fetching
the current _revs from the server. This will not necessarily succeed if the database
is write-heavy due to the race condition between retrieving the _rev and attempting
the update. The use of force outside of a debugging context is highly discouraged.
merge (function w/ signature ƒ(local_doc, server_doc)): If the inital update request
causes any conflicts, the current server copy of each conflicting doc will be
fetched and the `merge` function will be called for each local/remote pair. The
merge function should return either a dict-like object to be written to the database
or (in case the write attempt should be abandoned) None.
Side Effects:
All docs passed as arguments will have their _id and/or _rev updated to reflect a
successful write. In addition, these up-to-date dicts can be found in the
return value's `.resolved` property (useful in an async context where the callback
function doesn't have the original arguments in its scope)
Returns:
ConflictResolution. An object with two attributes of interest:
* pending: a dictionary (keyed by _id) of docs which were not successfully
written due to conflicts
* resolved: a dictionary (keyed by _id) of docs which were successfully written
Raises:
Conflict.
As with the Database.get method, exceptions will be raised in the single-doc case,
but when dealing with a list of docs, errors will be signaled on a doc-by-doc basis
in the return value.
"""
# if we're being called from a generator, create a Task and then re-call ourselves
# using its callback
if not callback and is_relaxed():
from tornado import gen
def multipass(callback):
gen_callback = callback # supplied by gen.Task
def unpack_results(data, status):
gen_callback(data) if status.ok else gen_callback(status.exception)
self.save(doc_or_docs, merge=merge, force=force, callback=unpack_results, **options)
return gen.Task(multipass)
# look for missing _id fields
orphans = []
_save = None
_couch = self._couch
if isinstance(doc_or_docs, (list, tuple)):
orphans = [doc for doc in doc_or_docs if '_id' not in doc]
_save = self._bulk_save
elif hasattr(doc_or_docs,'items'):
orphans = [doc_or_docs] if '_id' not in doc_or_docs else []
_save = self._solo_save
else:
raise TypeError('expected dict or list, got %s' % type(doc_or_docs))
# raise an exception if the docs arg isn't serializeable, would be nice to
# know if this is as wasteful as it feels...
json.encode(doc_or_docs)
# fill in missing _ids with cached/fetched uuids then proceed with the save
if len(orphans) > len(self._uuids):
def decorate_uuids(data, status):
if status.ok:
self._uuids.extend(data['uuids'])
for doc, uuid in zip(orphans, self._uuids):
doc['_id'] = uuid
self._uuids = self._uuids[len(orphans):]
return _save(doc_or_docs, force=force, merge=merge, callback=callback, **options), status
else:
return data, status
cb = proc = None
if callback: cb = decorate_uuids
else: proc = decorate_uuids
return _couch.resource.get_json('_uuids', callback=cb, process=proc, count=max(len(orphans), defaults.uuid_cache))
else:
if orphans:
for doc, uuid in zip(orphans, self._uuids):
doc['_id'] = uuid
self._uuids = self._uuids[len(orphans):]
return _save(doc_or_docs, force=force, merge=merge, callback=callback, **options)
def copy(self, source, dest, callback=None):
"""Copy a given document to create a new document or overwrite an old one.
Args:
source (str, dict): either a string containing an ID or a dict with `_id` and `_rev` keys
specifying the document to be copied
dest (str, dict): either a string containing an ID or a dict with `_id` and `_rev` keys
specifying the document to be created/overwritten
Returns:
dict of form {id:'', rev:''} identifying the destination document.
Raises:
Conflict (when dest doc already exists and an up-to-date _rev was not specified)
"""
src = source
if not isinstance(src, basestring):
if not isinstance(src, dict):
if hasattr(src, 'items'):
src = dict(src.items())
else:
raise TypeError('expected dict or string, got %s' %
type(src))
src = src['_id']
if not isinstance(dest, basestring):
if not isinstance(dest, dict):
if hasattr(dest, 'items'):
dest = dict(dest.items())
else:
raise TypeError('expected dict or string, got %s' %
type(dest))
if '_rev' in dest:
dest = '%s?%s' % (quote(dest['_id']),
urlencode({'rev': dest['_rev']}))
else:
dest = quote(dest['_id'])
def postproc(data, status):
if status.ok:
data = data['rev']
return data, status
return self.resource._request_json('COPY', src, headers={'Destination': dest},
process=postproc, callback=callback)
def delete(self, doc, callback=None):
"""Delete the given document from the database.
Args:
doc (dict-like): an object with `_id` and `_rev` keys identifying the doc to be deleted
Returns:
dict: {ok:True, id:'', rev:''}
Raises:
NotFound (when doc['_id'] does not exist)
Conflict (when dest doc already exists and an up-to-date _rev was not specified)
"""
if doc['_id'] is None:
raise ValueError('document ID cannot be None')
headers={'Content-Type': 'application/json'}
# TODO *could* have it return the doc but with _deleted=True appended...
return _doc_resource(self.resource, doc['_id']).delete_json(rev=doc['_rev'], headers=headers, callback=callback)
def revisions(self, id, callback=None, **options):
"""Return all available revisions of the given document.
Args:
id (str): ID of the doc whose revisions to fetch
Returns:
list. All prior _rev strings sorted reverse chronologically
Raises:
NotFound.
"""
def postproc(data, status):
if status.ok:
history = []
startrev = data['_revisions']['start']
for index, rev in enumerate(data['_revisions']['ids']):
history.append('%d-%s' % (startrev - index, rev))
data = history
return data, status
resource = _doc_resource(self.resource, id)
return resource.get_json(revs=True, process=postproc, callback=callback)
def info(self, ddoc=None, callback=None):
"""Fetch information about the database or a design document.
Args:
ddoc (str): optional design doc name
Returns:
dict. Equivalent to a GET on the database or ddoc's url
Raises:
NotFound
"""
if ddoc is not None:
return self.resource('_design', ddoc, '_info').get_json(callback=callback)
else:
def postproc(data, status):
self.name = data['db_name']
return data, status
return self.resource.get_json(process=postproc, callback=callback)
def delete_attachment(self, doc, filename, callback=None):
"""Delete the specified attachment.
Args:
doc (dict-like): an object with `_id` and `_rev` keys
filename (str): the name of the attachment to be deleted in the given doc
Side Effects:
Will update the doc argument's _rev value upon succesfully deleting the attachment
Returns:
dict {ok:True}
Raises:
NotFound, Conflict.
"""
def postproc(data, status):
if status.ok:
rev = data.get('rev')
if rev is not None:
doc['_rev'] = rev
del doc['_attachments'][filename]
if not doc['_attachments'].keys():
del doc['_attachments']
data = doc
return data, status
resource = _doc_resource(self.resource, doc['_id'])
return resource.delete_json(filename, rev=doc['_rev'], process=postproc, callback=callback)
def get_attachment(self, id_or_doc, filename, callback=None):
"""Return an attachment from the specified doc and filename.
Args:
doc (str, dict-like): an ID string or dict with an `_id` key
filename (str): the name of the attachment to retrieve
Returns:
str. The raw attachment data as a bytestring
"""
if isinstance(id_or_doc, basestring):
_id = id_or_doc
else:
_id = id_or_doc['_id']
return _doc_resource(self.resource, _id).get(filename, callback=callback)
def put_attachment(self, doc, content, filename=None, content_type=None, callback=None):
"""Create or replace an attachment.
Args:
doc (dict-like): an object with `_id` and `_rev` keys
content (str, file): the attachment data
filename (str): optionally specify the name to use (unnecessary w/ file objects)
content_type (str): optionally specify the mime type to use (unnecessary w/ file objects)
Side Effects:
Will update the doc argument's _rev value upon succesfully updating the attachment
Returns:
dict of form `{ok:True, id:'', rev:''}`
Raises:
NotFound, Conflict
"""
if filename is None:
if hasattr(content, 'name'):
filename = os.path.basename(content.name)
else:
raise ValueError('no filename specified for attachment')
if content_type is None:
content_type = ';'.join(
filter(None, mimetypes.guess_type(filename))
)
def postproc(data, status):
if status.ok:
doc['_rev'] = data['rev']
_attch = doc.get('_attachments',adict())
_attch[filename] = dict(content_type=content_type, stub=True, added=True)
doc['_attachments'] = _attch
data = doc
return data, status
resource = _doc_resource(self.resource, doc['_id'])
headers={'Content-Type': content_type}
return resource.put_json(filename, body=content, headers=headers, rev=doc['_rev'],
process=postproc, callback=callback)
def purge(self, docs, callback=None):
"""Perform purging (complete removal) of the given documents.
Uses a single HTTP request to purge all given documents. Purged
documents do not leave any metadata in the storage and are not
replicated.
Think thrice before doing this.
Args:
docs (list): containing dicts of the form `{_id:'', _rev:''}`
Returns:
dict of the form `{purge_seq:0, purged:{id1:[], id2:[], ...}}`
"""
content = {}
for doc in docs if not hasattr(docs, 'items') else [docs]:
if isinstance(doc, dict):
content[doc['_id']] = [doc['_rev']]
elif hasattr(doc, 'items'):
doc = dict(doc.items())
content[doc['_id']] = [doc['_rev']]
else:
raise TypeError('expected dict, got %s' % type(doc))
return self.resource.post_json('_purge', body=content, callback=callback)
def show(self, name, id=None, callback=None, **options):
"""Call a 'show' function.
Args:
name (str): the show function to use (e.g., myddoc/atomfeed)
id (str): optional ID of the doc on which the show function will
be run
Returns:
object with two attributes of interest:
* headers: a dictionary of the response headers
* body: either a bytestring or a decoded json object (if the
response content type was application/json)
"""
path = _path_from_name(name, '_show')
if id:
path.append(id)
def postproc(data, status):
if status.ok:
if status.headers.get('content-type') == 'application/json':
body = json.decode(data)
data = adict(body=body, headers=status.headers)
return data, status
return self.resource(*path).get(callback=callback, **options)
def list(self, name, view, callback=None, **options):
"""Format a view using a 'list' function.
Args:
name (str): the ddoc and list function name (e.g., myddoc/weekly)
view (str): a view to run the list against. if the view is in
the same ddoc as the list function, just its name can
be passed (e.g., 'stats' instead of 'myddoc/stats').
Otherwise the ddoc should be included in the view name.
Returns:
object with two attributes of interest:
* headers: a dictionary of the response headers
* body: either a bytestring or a decoded json object (if the response
content type was application/json)
"""
path = _path_from_name(name, '_list')
path.extend(view.split('/', 1))
opts = _encode_view_options(options)
# return a {body:, headers:} dict where body is either a string or (if
# the content-type was json) a decoded dict
def postproc(data, status):
body = data
if status.ok:
if status.headers.get('content-type') == 'application/json':
body = json.decode(data)
data = adict(body=body, headers=status.headers)
return data, status
return self.resource(*path).get(process=postproc, callback=callback, **opts)
def update(self, name, id=None, body=None, callback=None, **options):
"""Calls a server side update handler.
Args:
name (str): the update-handler function name (e.g., myddoc/in_place_update)
id (str): optionally specify the ID of a doc to update
Kwargs:
body (str, dict): optionally include data in the POST body. Dicts will
be form-encoded and will appear in your update handler in the req.form
field. Strings will be passed as-is and can be found in req.body.
Other kwargs will be urlencoded and appended to the query string.
"""
path = _path_from_name(name, '_update')
if id is None:
func = self.resource(*path).post
else:
path.append(id)
func = self.resource(*path).put
headers = {}
if hasattr(body, 'items'):
body = urlencode(body)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
return func(callback=callback, body=body, headers=headers, **options)
def changes(self, callback=None, **opts):
"""Retrieve a list of changes from the database or begin listening to
a continuous feed.
Kwargs:
since (int):
the earliest seq value that should be reported
limit (int):
maximum number of changes to return
filter (str):
name of a filter function (e.g., `myddoc/subset`)
include_docs (bool):
if true, each element of the 'results' array in the return
value will also containt a 'doc' attribute
feed (str):
if 'continuous', a `ChangesFeed` object will be created and begin
listening to the specified _changes feed.
callback (function w/ signature ƒ(seq, changes)):
the callback will be invoked repeatedly whenever new changes
arrive. The seq argument is the integer value of the most recent
seq in that batch. the changes argument is a list of dicts of
the form: `[{seq:0, id:'', changes:[]}, …]`
latency (default=1):
minimum time period between invocations of the callback. When
set to 0, the callback will be invoked for every individual
change. With higher values, changes will be batched for
efficiency's sake.
heartbeat (int):
time period between keepalive events (in seconds).
timeout (int):
maximum period of inactivity (in seconds) before which the server
will send a response.
Returns:
When called without requesting a feed: a dictionary of the form `{last_seq:0, results:[{id:'', seq:0, …}, …]}`
If called with `feed='continuous'` and a valid callback: a ChangesFeed object
"""
if opts.get('feed') == 'continuous':
if not hasattr(callback, '__call__'):
raise RuntimeError('Continuous changes feed requires a callback argument')
return ChangesFeed(self, callback=callback, **opts)
return self.resource.get_json('_changes', callback=callback, **opts)
def query(self, map_src, reduce_src=None, callback=None, **options):
"""Create a temporary view using the provided javascript function(s)
and perform a mind-bendingly inefficient ad-hoc query.
Args:
map_src (str): a map function string such as:
'function(doc){emit(null,null)}'
reduce_src (str): optionally include a reduce function string:
'function(key, values, rereduce){return sum(values)}'
or the name of a builtin (`_sum`, `_count`, or `_stats`)
Kwargs:
all standard view options (see Database.view)
Returns:
View. An iterable list of Row object.
"""
body = dict(map=map_src, language='javascript')
if reduce_src:
body['reduce'] = reduce_src
viewkeys = options.pop('keys', None)
opts = _encode_view_options(options)
if viewkeys:
body['keys'] = viewkeys
content = json.encode(body)
headers = {'Content-Type': 'application/json'}
def postproc(data, status):
# print "raw ««%s»»"%data
if status.ok:
data = View('_temp_view', options, data)
return data, status
return self.resource('_temp_view').post_json(body=content, headers=headers, process=postproc, callback=callback, **opts)
def view(self, name, callback=None, **options):
"""Query a view.
All of the query args in the HTTP api can be passed as keyword
args. Key values should be json-serializeable objects (abbreviated
as `obj` below) of the form defined in your view function.
Args:
name (str): a view name of the form 'myddoc/viewname'
Kwargs:
key (obj): retrieve only rows matching this key
keys (list): a list of key values to retrieve
descending (bool): whether to invert the ordering of the rows.
This ordering is applied before any key filtering takes place,
thus you may need to reverse your `start`s and `end`s when
toggling this.
startkey (obj): key of the first row to include in the results
endkey (obj): key of the final row of results
inclusive_end (bool): by default, include rows matching `endkey`
in the results. If False, treat `endkey` as defining the
first rows to be *excluded* from the results.
startkey_docid (str): within the rows bounded by startkey and
endkey, perform a further filtering by discarding rows before
this ID.
endkey_docid (str): discard rows between this doc ID and endkey
include_docs (bool): if True, each Row in the results will
have the corresponding document in its .doc attr.
limit (int): the maximum number of rows to retrieve
stale (str): specify how to handle view indexing,
* 'ok': build results from the current index even if it's
out of date
* 'update_after': return stale results but trigger a
view re-index for the benefit of subsequent queries.
skip (int): of the rows that would be returned on the basis of
any prior key filtering, discard this many from the beginning.
update_seq (bool): include an update_seq field in the response
indicating the seq of the most recent indexing (from which the
results were pulled).
reduce (bool): if False, ignore the reduce function on this
view and return the output of its map step.
group (bool): if True, generate a row for each distinct key in
the reduce results. By defualt, the reduce function's output
will boil down to a single row.
group_level (int or 'exact'): when using ‘complex keys’ (i.e.,
lists) group_level defines how many elements from each key
should be used when deciding if rows have ‘distinct’ keys
for the purposes of the reduction step.
Returns:
View.
"""
path = _path_from_name(name, '_view')
propterhoc = options.get('process',NOOP)
if propterhoc is not NOOP:
del options['process']
def posthoc(data, status):
if status.ok:
data = View(name, options, data)
return propterhoc(data, status)
viewkeys = options.pop('keys', None)
opts = _encode_view_options(options)
if viewkeys:
return self.resource(*path).post_json(body=dict(keys=viewkeys), process=posthoc, callback=callback, **opts)
else:
return self.resource(*path).get_json(process=posthoc, callback=callback, **opts)
def _doc_resource(base, doc_id):
"""Return the resource for the given document id.
"""
# Split an id that starts with a reserved segment, e.g. _design/foo, so
# that the / that follows the 1st segment does not get escaped.
try:
if doc_id[:1] == '_':
return base(*doc_id.split('/', 1))
return base(doc_id)
except Exception, e:
tron()
def _path_from_name(name, type):
"""Expand a 'design/foo' style name to its full path as a list of
segments.
"""
if name.startswith('_'):
return name.split('/')
design, name = name.split('/', 1)
return ['_design', design, type, name]
def _encode_view_options(options):
"""Encode any items in the options dict that are sent as a JSON string to a
view/list function.
"""
retval = {}
for name, value in options.items():
if name in ('key', 'startkey', 'endkey') or not isinstance(value, basestring):
value = json.encode(value)
retval[name] = value
return retval
SPECIAL_DB_NAMES = set(['_users'])
VALID_DB_NAME = re.compile(r'^[a-z][a-z0-9_$()+-/]*$')
def validate_dbname(name, encoded=False):
if encoded:
from urllib import unquote
name = unquote(name)
if name in SPECIAL_DB_NAMES:
return name
if not VALID_DB_NAME.match(name):
raise ValueError('Invalid database name')
return name
| 0.007111 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for GradientDescent optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import gradient_descent
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class GradientDescentOptimizerTest(test.TestCase):
def testBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
optimizer = gradient_descent.GradientDescentOptimizer(3.0)
sgd_op = optimizer.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
self.assertEqual(0, len(optimizer.variables()))
def testBasicResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
def testMinimizeResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(var0, x) + var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0 - np_grad], var1.eval())
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
pred += var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0 - np_grad], var1.eval())
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lrate = constant_op.constant(3.0)
sgd_op = gradient_descent.GradientDescentOptimizer(
lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
def testGradWrtRef(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
opt = gradient_descent.GradientDescentOptimizer(3.0)
values = [1.0, 3.0]
vars_ = [variables.Variable([v], dtype=dtype) for v in values]
grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_)
variables.global_variables_initializer().run()
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], grad.eval())
def testWithGlobalStep(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
global_step = variables.Variable(0, trainable=False)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params and global_step
self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
var0.eval())
self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
var1.eval())
self.assertAllCloseAccordingToType(1, global_step.eval())
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0], [2.0]], var0.eval())
self.assertAllCloseAccordingToType([[3.0], [4.0]], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[1.0 - 3.0 * 0.1], [2.0]],
var0.eval())
self.assertAllCloseAccordingToType([[3.0], [4.0 - 3.0 * 0.01]],
var1.eval())
if __name__ == "__main__":
test.main()
| 0.002692 |
# Global settings for mothra project.
import django.conf.global_settings as DEFAULT_SETTINGS
import os
PROJECT_DIR = os.path.dirname(__file__)
PUBLIC_DIR = os.path.join(PROJECT_DIR, 'public')
BACKUP_DIR = os.path.join(PROJECT_DIR, 'backup')
DEBUG = False
TEMPLATE_DEBUG = True
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PUBLIC_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PUBLIC_DIR, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware', # must be first
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware', # must be last
)
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_SECONDS = 10
CACHE_MIDDLEWARE_KEY_PREFIX = 'mothra'
ROOT_URLCONF = 'mothra.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mothra.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
FIXTURE_DIRS = (
os.path.join(PROJECT_DIR, 'fixtures'),
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
USE_CONCURRENCY = False
INSTALLED_APPS_DEFAULT = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'south',
'website',
'signuplogin',
'django_extensions',
'django.contrib.humanize',
'orderable_inlines',
'workflows',
'picklefield',
'streams',
#'rest_framework',
'djcelery',
#'kombu.transport.django',
)
INSTALLED_APPS_WORKFLOWS_SUB = ()
import djcelery
djcelery.setup_loader()
try:
LOCAL_SETTINGS
except NameError:
try:
from local_settings import *
except ImportError:
pass
INSTALLED_APPS = \
INSTALLED_APPS_DEFAULT +\
INSTALLED_APPS_WORKFLOWS_SUB
#REST_FRAMEWORK = {
# 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
# 'PAGINATE_BY': 10
#}
TEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS
TEMPLATES_FOLDER = os.path.join(PROJECT_DIR, 'templates')
PROJECT_FOLDER = PROJECT_DIR
TEMPLATE_DIRS = (TEMPLATES_FOLDER,)
AUTH_PROFILE_MODULE = 'workflows.UserProfile'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
STATIC_DOC_ROOT = os.path.join(os.getcwd(), 'mothra/public/media')
CELERY_RESULT_BACKEND = 'amqp'
CELERY_TASK_RESULT_EXPIRES = 18000
| 0.001693 |
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import IECore
import IECoreRI
class CoordinateSystemTest( IECoreRI.TestCase ) :
outputFileName = os.path.dirname( __file__ ) + "/output/coordSys.rib"
def test( self ) :
r = IECoreRI.Renderer( self.outputFileName )
with IECore.WorldBlock( r ) :
c = IECore.CoordinateSystem( "helloWorld" )
c.render( r )
l = "".join( file( self.outputFileName ).readlines() )
self.failUnless( "ScopedCoordinateSystem \"helloWorld\"" in l )
self.failIf( "TransformBegin" in l )
self.failIf( "TransformEnd" in l )
def testTransform( self ) :
r = IECoreRI.Renderer( self.outputFileName )
with IECore.WorldBlock( r ) :
c = IECore.CoordinateSystem(
"helloWorld",
IECore.MatrixTransform( IECore.M44f.createTranslated( IECore.V3f( 1 ) ) ),
)
c.render( r )
l = "".join( file( self.outputFileName ).readlines() )
self.failUnless( "ScopedCoordinateSystem \"helloWorld\"" in l )
self.failUnless( "TransformBegin" in l )
self.failUnless( "TransformEnd" in l )
self.failUnless( "ConcatTransform" in l )
def testScoping( self ) :
class TestProcedural( IECore.ParameterisedProcedural ) :
def __init__( self ) :
IECore.ParameterisedProcedural.__init__( self, "" )
def doBound( self, args ) :
return IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 10 ) )
def doRender( self, renderer, args ) :
self.coordinateSystem = renderer.getTransform( "testCoordSys" )
renderer = IECoreRI.Renderer( "" )
procedurals = []
with IECore.WorldBlock( renderer ) :
for i in range( 0, 10 ) :
renderer.setAttribute( "user:proceduralIndex", IECore.IntData( i ) )
g = IECore.Group()
g.addState( IECore.CoordinateSystem( "testCoordSys", IECore.MatrixTransform( IECore.M44f.createTranslated( IECore.V3f( i ) ) ) ) )
p = TestProcedural()
g.addChild( p )
procedurals.append( p )
g.render( renderer )
for i in range( 0, 10 ) :
self.assertEqual( procedurals[i].coordinateSystem.translation(), IECore.V3f( i ) )
if __name__ == "__main__":
unittest.main()
| 0.051998 |
"""
Django settings for djtest project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG', '') == 'True'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ticketsub',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'djtest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djtest.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, os.environ.get('DJANGO_DB_NAME','')),
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static file settings
STATIC_URL = '/static/'
| 0.000472 |
"""
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from problem.models import Problem
from problem.models import Submission
from problem.models import SubmissionDetail
from contest.models import Contest
from contest.models import Contestant
from utils.user_info import send_notification
from utils.log_info import get_logger
logger = get_logger()
'''
rejudge:
1. problem's all submission
2. a single submission
3. submissions during contest
'''
def rejudge(obj):
if isinstance(obj, Problem):
rejudge_problem(obj)
elif isinstance(obj, Submission):
rejudge_submission(obj)
elif isinstance(obj, Contest):
rejudge_contest(obj)
# rejudge submissions of problem
def rejudge_problem(problem):
submissions = Submission.objects.filter(problem=problem)
for submission in submissions:
rejudge_submission(submission)
# rejudge single submission
def rejudge_submission(submission):
if submission.status == Submission.ACCEPTED:
submission.problem.ac_count -= 1
submission.problem.save()
submission.status = Submission.WAIT
submission.save()
notification = "Your submission %s is to be rejudged!" % submission.id
send_notification(submission.user, notification)
logger.info('Submission %s rejudged!' % submission.id)
submission_details = SubmissionDetail.objects.filter(sid=submission)
for submission_detail in submission_details:
logger.info('SubmissionDetail %s deleted!' % submission_detail)
submission_detail.delete()
# rejudge submissions during contest
def rejudge_contest(contest):
for problem in contest.problem.all():
rejudge_contest_problem(contest, problem)
# rejudge submissions of problem in contest
def rejudge_contest_problem(contest, problem):
contestants = Contestant.objects.filter(contest=contest).\
values_list('user', flat=True)
submissions = Submission.objects.filter(
problem=problem,
submit_time__gte=contest.start_time,
submit_time__lte=contest.end_time,
user__in=contestants)
for submission in submissions:
rejudge_submission(submission)
| 0 |
import os
class LocalRepository:
CACHE_GETFILES_TIMEOUT = 3600
def __init__(self, base_dir, parser, cache=None, pagesize=5):
self.base_dir = os.path.abspath(base_dir)
self.parser = parser
self.cache = cache
self.pagesize = pagesize
self.testdir(self.base_dir)
def getfile(self, directory, slug):
cache_key = "file-detail-{}".format(slug)
entry = self.cache.get(cache_key)
if not entry:
filename = slug + ".md"
filepath = os.path.abspath(os.path.join(self.base_dir, directory, filename))
if os.path.exists(filepath):
meta, content = self.parser.extractfilemeta(filepath)
entry = {'meta': meta, 'content': content}
return entry
def getfiles(self, directory, page):
cache_key = "all-files-{}".format(directory)
files = self.cache.get(cache_key)
if not files:
directory = os.path.abspath(os.path.join(self.base_dir, directory))
self.testdir(directory)
files = []
for file in os.listdir(directory):
meta, content = self.parser.extractfilemeta(os.path.join(directory, file))
parsed_file = {'meta': meta, 'content': content}
files.append(parsed_file)
# Sort the file list by publish date
files.sort(key=lambda x: x['meta']['create_date'], reverse=True)
self.cache.set(cache_key, files, self.CACHE_GETFILES_TIMEOUT)
sliced_files = self.paginate(files, page)
response = {'total': len(files), 'entries': sliced_files}
return response
def paginate(self, files, page):
limit = self.pagesize
offset = (page - 1) * self.pagesize
sliced_files = files[offset:(limit + offset)]
return sliced_files
@staticmethod
def testdir(directory):
if not os.path.exists(directory):
raise RuntimeError("The repository path doesn't exist.")
| 0.002469 |
#!/usr/bin/env python
'''
Python + Ansible - Class 3 - Exercise 2
Gleydson Mazioli da Silva <gleydsonmazioli@gmail.com>
I created this program with a different concept: Data saving and load using
a yaml or json file. So the system save system resources and can be run throught
a cron or anacron job.
'''
import snmp_helper
import yaml
import json
import sys
import pygal
# pylint: disable=C0103
# pylint: disable=line-too-long
my_ip = '50.76.53.27'
my_user = 'pysnmp'
my_pass = 'galileo1'
my_enckey = 'galileo1'
my_host = (my_ip, 7961)
verbose = True
# File format should be json or yaml
file_fmt = 'json'
def save_data(l_var):
'''
Save data
'''
try:
with open('exercise3-2.'+file_fmt, 'w') as fhandler:
if file_fmt == 'yaml':
fhandler.write(yaml.dump(l_var, default_flow_style=False))
elif file_fmt == 'json':
json.dump(l_var, fhandler)
else:
print 'Unknown format: %s' % (file_fmt)
sys.exit(1)
except IOError:
print 'An error happened: '
def load_saved_data(l_default):
'''
Load previous saved data
'''
try:
with open('exercise3-2.'+file_fmt, 'r') as fhandler:
if file_fmt == 'yaml':
file_data = yaml.load(fhandler)
elif file_fmt == 'json':
file_data = json.load(fhandler)
else:
sys.exit('File Read: Invalid file format: '+file_fmt)
except IOError:
if verbose:
print 'File not found: exercise3-2.'+file_fmt
return l_default
return file_data
def get_snmp_data(router, snmp_user, miboid):
'''
Get and return snmp data
'''
snmp_data = snmp_helper.snmp_extract(snmp_helper.snmp_get_oid_v3(router, snmp_user, oid=miboid))
return snmp_data
def generate_graphic(l_data):
'''
Generate a SVG graphic using data passed as an argument
'''
graph_stats = {
"in_octets": [],
"out_octets": [],
"in_ucast_pkts": [],
"out_ucast_pkts": []
}
for l_label in ("in_octets", "out_octets", "in_ucast_pkts", "out_ucast_pkts"):
l_old_value = 0
for i in range(0, len(l_data)):
l_value = l_data[i][l_label]
if l_old_value == 0:
l_diff = 0
else:
l_diff = int(l_value)-int(l_old_value)
if verbose:
print 'xxxxx: %s, diff: %s, (old: %s)' % (l_value, l_diff, l_old_value)
graph_stats[l_label].append(l_diff)
l_old_value = l_value
if verbose:
print graph_stats
line_chart = pygal.Line()
line_chart.title = 'Input/Output bytes and Unicast'
line_chart.add('InBytes', graph_stats['in_octets'])
line_chart.add('OutBytes', graph_stats['out_octets'])
line_chart.add('InUnicast', graph_stats['in_ucast_pkts'])
line_chart.add('OutUnicast', graph_stats['out_ucast_pkts'])
line_chart.render_to_file('exercise2.svg')
def main():
'''
Main Function
'''
snmp_user = (my_user, my_pass, my_enckey)
if_ifdescr = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.2.5')
if_in_octets = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.10.5')
if_out_octets = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.16.5')
if_in_ucast_pkts = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.11.5')
if_out_ucast_pkts = get_snmp_data(my_host, snmp_user, '1.3.6.1.2.1.2.2.1.17.5')
print 'Using file format: %s' % (file_fmt)
old_data_list = load_saved_data(0)
# pylint: disable=maybe-no-member
if old_data_list == 0:
old_if_ifdescr = if_ifdescr
old_if_in_octets = if_in_octets
old_if_out_octets = if_out_octets
old_if_in_ucast_pkts = if_in_ucast_pkts
old_if_out_ucast_pkts = if_out_ucast_pkts
data_list = range(0)
else:
old_if_ifdescr = old_data_list[-1]['ifdescr']
old_if_in_octets = old_data_list[-1]['in_octets']
old_if_out_octets = old_data_list[-1]['out_octets']
old_if_in_ucast_pkts = old_data_list[-1]['in_ucast_pkts']
old_if_out_ucast_pkts = old_data_list[-1]['out_ucast_pkts']
data_list = old_data_list
if verbose:
print 'IfDescr: %s (last: %s)' % (if_ifdescr, old_if_ifdescr)
print 'InOctets %s (last: %s)' % (if_in_octets, old_if_in_octets)
print 'OutOctets %s (last: %s)' % (if_out_octets, old_if_out_octets)
print 'In Ucast %s (last: %s)' % (if_in_ucast_pkts, old_if_in_ucast_pkts)
print 'Out Ucast %s (last: %s)' % (if_out_ucast_pkts, old_if_out_ucast_pkts)
# Array preparation to save data
data_list.append({})
data_list[-1]['ifdescr'] = if_ifdescr
data_list[-1]['in_octets'] = if_in_octets
data_list[-1]['out_octets'] = if_out_octets
data_list[-1]['in_ucast_pkts'] = if_in_ucast_pkts
data_list[-1]['out_ucast_pkts'] = if_out_ucast_pkts
save_data(data_list)
generate_graphic(data_list)
if verbose:
print '----------------------------'
if __name__ == "__main__":
main()
quit()
| 0.002331 |
""" Maximally localized Wannier Functions
Find the set of maximally localized Wannier functions
using the spread functional of Marzari and Vanderbilt
(PRB 56, 1997 page 12847).
"""
from time import time
from math import sqrt, pi
from pickle import dump, load
import numpy as np
from ase.parallel import paropen
from ase.dft.kpoints import get_monkhorst_pack_size_and_offset
from ase.transport.tools import dagger, normalize
dag = dagger
def gram_schmidt(U):
"""Orthonormalize columns of U according to the Gram-Schmidt procedure."""
for i, col in enumerate(U.T):
for col2 in U.T[:i]:
col -= col2 * np.dot(col2.conj(), col)
col /= np.linalg.norm(col)
def gram_schmidt_single(U, n):
"""Orthogonalize columns of U to column n"""
N = len(U.T)
v_n = U.T[n]
indices = range(N)
del indices[indices.index(n)]
for i in indices:
v_i = U.T[i]
v_i -= v_n * np.dot(v_n.conj(), v_i)
def lowdin(U, S=None):
"""Orthonormalize columns of U according to the Lowdin procedure.
If the overlap matrix is know, it can be specified in S.
"""
if S is None:
S = np.dot(dag(U), U)
eig, rot = np.linalg.eigh(S)
rot = np.dot(rot / np.sqrt(eig), dag(rot))
U[:] = np.dot(U, rot)
def neighbor_k_search(k_c, G_c, kpt_kc, tol=1e-4):
# search for k1 (in kpt_kc) and k0 (in alldir), such that
# k1 - k - G + k0 = 0
alldir_dc = np.array([[0,0,0],[1,0,0],[0,1,0],[0,0,1],
[1,1,0],[1,0,1],[0,1,1]], int)
for k0_c in alldir_dc:
for k1, k1_c in enumerate(kpt_kc):
if np.linalg.norm(k1_c - k_c - G_c + k0_c) < tol:
return k1, k0_c
print 'Wannier: Did not find matching kpoint for kpt=', k_c
print 'Probably non-uniform k-point grid'
raise NotImplementedError
def calculate_weights(cell_cc):
""" Weights are used for non-cubic cells, see PRB **61**, 10040"""
alldirs_dc = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1],
[1, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=int)
g = np.dot(cell_cc, cell_cc.T)
# NOTE: Only first 3 of following 6 weights are presently used:
w = np.zeros(6)
w[0] = g[0, 0] - g[0, 1] - g[0, 2]
w[1] = g[1, 1] - g[0, 1] - g[1, 2]
w[2] = g[2, 2] - g[0, 2] - g[1, 2]
w[3] = g[0, 1]
w[4] = g[0, 2]
w[5] = g[1, 2]
# Make sure that first 3 Gdir vectors are included -
# these are used to calculate Wanniercenters.
Gdir_dc = alldirs_dc[:3]
weight_d = w[:3]
for d in range(3, 6):
if abs(w[d]) > 1e-5:
Gdir_dc = np.concatenate((Gdir_dc, alldirs_dc[d:d + 1]))
weight_d = np.concatenate((weight_d, w[d:d + 1]))
weight_d /= max(abs(weight_d))
return weight_d, Gdir_dc
def random_orthogonal_matrix(dim, seed=None, real=False):
"""Generate a random orthogonal matrix"""
if seed is not None:
np.random.seed(seed)
H = np.random.rand(dim, dim)
np.add(dag(H), H, H)
np.multiply(.5, H, H)
if real:
gram_schmidt(H)
return H
else:
val, vec = np.linalg.eig(H)
return np.dot(vec * np.exp(1.j * val), dag(vec))
def steepest_descent(func, step=.005, tolerance=1e-6, **kwargs):
fvalueold = 0.
fvalue = fvalueold + 10
count=0
while abs((fvalue - fvalueold) / fvalue) > tolerance:
fvalueold = fvalue
dF = func.get_gradients()
func.step(dF * step, **kwargs)
fvalue = func.get_functional_value()
count += 1
print 'SteepestDescent: iter=%s, value=%s' % (count, fvalue)
def md_min(func, step=.25, tolerance=1e-6, verbose=False, **kwargs):
if verbose:
print 'Localize with step =', step, 'and tolerance =', tolerance
t = -time()
fvalueold = 0.
fvalue = fvalueold + 10
count = 0
V = np.zeros(func.get_gradients().shape, dtype=complex)
while abs((fvalue - fvalueold) / fvalue) > tolerance:
fvalueold = fvalue
dF = func.get_gradients()
V *= (dF * V.conj()).real > 0
V += step * dF
func.step(V, **kwargs)
fvalue = func.get_functional_value()
if fvalue < fvalueold:
step *= 0.5
count += 1
if verbose:
print 'MDmin: iter=%s, step=%s, value=%s' % (count, step, fvalue)
if verbose:
t += time()
print '%d iterations in %0.2f seconds (%0.2f ms/iter), endstep = %s' %(
count, t, t * 1000. / count, step)
def rotation_from_projection2(proj_nw, fixed):
V_ni = proj_nw
Nb, Nw = proj_nw.shape
M = fixed
L = Nw - M
print 'M=%i, L=%i, Nb=%i, Nw=%i' % (M, L, Nb, Nw)
U_ww = np.zeros((Nw, Nw), dtype=proj_nw.dtype)
c_ul = np.zeros((Nb-M, L), dtype=proj_nw.dtype)
for V_n in V_ni.T:
V_n /= np.linalg.norm(V_n)
# Find EDF
P_ui = V_ni[M:].copy()
la = np.linalg
for l in range(L):
norm_list = np.array([la.norm(v) for v in P_ui.T])
perm_list = np.argsort(-norm_list)
P_ui = P_ui[:, perm_list].copy() # largest norm to the left
P_ui[:, 0] /= la.norm(P_ui[:, 0]) # normalize
c_ul[:, l] = P_ui[:, 0] # save normalized EDF
gram_schmidt_single(P_ui, 0) # ortho remain. to this EDF
P_ui = P_ui[:, 1:].copy() # remove this EDF
U_ww[:M] = V_ni[:M, :]
U_ww[M:] = np.dot(c_ul.T.conj(), V_ni[M:])
gram_schmidt(U_ww)
return U_ww, c_ul
def rotation_from_projection(proj_nw, fixed, ortho=True):
"""Determine rotation and coefficient matrices from projections
proj_nw = <psi_n|p_w>
psi_n: eigenstates
p_w: localized function
Nb (n) = Number of bands
Nw (w) = Number of wannier functions
M (f) = Number of fixed states
L (l) = Number of extra degrees of freedom
U (u) = Number of non-fixed states
"""
Nb, Nw = proj_nw.shape
M = fixed
L = Nw - M
U_ww = np.empty((Nw, Nw), dtype=proj_nw.dtype)
U_ww[:M] = proj_nw[:M]
if L > 0:
proj_uw = proj_nw[M:]
eig_w, C_ww = np.linalg.eigh(np.dot(dag(proj_uw), proj_uw))
C_ul = np.dot(proj_uw, C_ww[:, np.argsort(-eig_w.real)[:L]])
#eig_u, C_uu = np.linalg.eigh(np.dot(proj_uw, dag(proj_uw)))
#C_ul = C_uu[:, np.argsort(-eig_u.real)[:L]]
U_ww[M:] = np.dot(dag(C_ul), proj_uw)
else:
C_ul = np.empty((Nb - M, 0))
normalize(C_ul)
if ortho:
lowdin(U_ww)
else:
normalize(U_ww)
return U_ww, C_ul
class Wannier:
"""Maximally localized Wannier Functions
Find the set of maximally localized Wannier functions using the
spread functional of Marzari and Vanderbilt (PRB 56, 1997 page
12847).
"""
def __init__(self, nwannier, calc,
file=None,
nbands=None,
fixedenergy=None,
fixedstates=None,
spin=0,
initialwannier='random',
seed=None,
verbose=False):
"""
Required arguments:
``nwannier``: The number of Wannier functions you wish to construct.
This must be at least half the number of electrons in the system
and at most equal to the number of bands in the calculation.
``calc``: A converged DFT calculator class.
If ``file`` arg. is not provided, the calculator *must* provide the
method ``get_wannier_localization_matrix``, and contain the
wavefunctions (save files with only the density is not enough).
If the localization matrix is read from file, this is not needed,
unless ``get_function`` or ``write_cube`` is called.
Optional arguments:
``nbands``: Bands to include in localization.
The number of bands considered by Wannier can be smaller than the
number of bands in the calculator. This is useful if the highest
bands of the DFT calculation are not well converged.
``spin``: The spin channel to be considered.
The Wannier code treats each spin channel independently.
``fixedenergy`` / ``fixedstates``: Fixed part of Heilbert space.
Determine the fixed part of Hilbert space by either a maximal
energy *or* a number of bands (possibly a list for multiple
k-points).
Default is None meaning that the number of fixed states is equated
to ``nwannier``.
``file``: Read localization and rotation matrices from this file.
``initialwannier``: Initial guess for Wannier rotation matrix.
Can be 'bloch' to start from the Bloch states, 'random' to be
randomized, or a list passed to calc.get_initial_wannier.
``seed``: Seed for random ``initialwannier``.
``verbose``: True / False level of verbosity.
"""
# Bloch phase sign convention
sign = -1
classname = calc.__class__.__name__
if classname in ['Dacapo', 'Jacapo']:
print 'Using ' + classname
sign = +1
self.nwannier = nwannier
self.calc = calc
self.spin = spin
self.verbose = verbose
self.kpt_kc = calc.get_bz_k_points()
assert len(calc.get_ibz_k_points()) == len(self.kpt_kc)
self.kptgrid = get_monkhorst_pack_size_and_offset(self.kpt_kc)[0]
self.kpt_kc *= sign
self.Nk = len(self.kpt_kc)
self.unitcell_cc = calc.get_atoms().get_cell()
self.largeunitcell_cc = (self.unitcell_cc.T * self.kptgrid).T
self.weight_d, self.Gdir_dc = calculate_weights(self.largeunitcell_cc)
self.Ndir = len(self.weight_d) # Number of directions
if nbands is not None:
self.nbands = nbands
else:
self.nbands = calc.get_number_of_bands()
if fixedenergy is None:
if fixedstates is None:
self.fixedstates_k = np.array([nwannier] * self.Nk, int)
else:
if type(fixedstates) is int:
fixedstates = [fixedstates] * self.Nk
self.fixedstates_k = np.array(fixedstates, int)
else:
# Setting number of fixed states and EDF from specified energy.
# All states below this energy (relative to Fermi level) are fixed.
fixedenergy += calc.get_fermi_level()
print fixedenergy
self.fixedstates_k = np.array(
[calc.get_eigenvalues(k, spin).searchsorted(fixedenergy)
for k in range(self.Nk)], int)
self.edf_k = self.nwannier - self.fixedstates_k
if verbose:
print 'Wannier: Fixed states : %s' % self.fixedstates_k
print 'Wannier: Extra degrees of freedom: %s' % self.edf_k
# Set the list of neighboring k-points k1, and the "wrapping" k0,
# such that k1 - k - G + k0 = 0
#
# Example: kpoints = (-0.375,-0.125,0.125,0.375), dir=0
# G = [0.25,0,0]
# k=0.375, k1= -0.375 : -0.375-0.375-0.25 => k0=[1,0,0]
#
# For a gamma point calculation k1 = k = 0, k0 = [1,0,0] for dir=0
if self.Nk == 1:
self.kklst_dk = np.zeros((self.Ndir, 1), int)
k0_dkc = self.Gdir_dc.reshape(-1, 1, 3)
else:
self.kklst_dk = np.empty((self.Ndir, self.Nk), int)
k0_dkc = np.empty((self.Ndir, self.Nk, 3), int)
# Distance between kpoints
kdist_c = np.empty(3)
for c in range(3):
# make a sorted list of the kpoint values in this direction
slist = np.argsort(self.kpt_kc[:, c], kind='mergesort')
skpoints_kc = np.take(self.kpt_kc, slist, axis=0)
kdist_c[c] = max([skpoints_kc[n + 1, c] - skpoints_kc[n, c]
for n in range(self.Nk - 1)])
for d, Gdir_c in enumerate(self.Gdir_dc):
for k, k_c in enumerate(self.kpt_kc):
# setup dist vector to next kpoint
G_c = np.where(Gdir_c > 0, kdist_c, 0)
if max(G_c) < 1e-4:
self.kklst_dk[d, k] = k
k0_dkc[d, k] = Gdir_c
else:
self.kklst_dk[d, k], k0_dkc[d, k] = \
neighbor_k_search(k_c, G_c, self.kpt_kc)
# Set the inverse list of neighboring k-points
self.invkklst_dk = np.empty((self.Ndir, self.Nk), int)
for d in range(self.Ndir):
for k1 in range(self.Nk):
self.invkklst_dk[d, k1] = self.kklst_dk[d].tolist().index(k1)
Nw = self.nwannier
Nb = self.nbands
self.Z_dkww = np.empty((self.Ndir, self.Nk, Nw, Nw), complex)
self.V_knw = np.zeros((self.Nk, Nb, Nw), complex)
if file is None:
self.Z_dknn = np.empty((self.Ndir, self.Nk, Nb, Nb), complex)
for d, dirG in enumerate(self.Gdir_dc):
for k in range(self.Nk):
k1 = self.kklst_dk[d, k]
k0_c = k0_dkc[d, k]
self.Z_dknn[d, k] = calc.get_wannier_localization_matrix(
nbands=Nb, dirG=dirG, kpoint=k, nextkpoint=k1,
G_I=k0_c, spin=self.spin)
self.initialize(file=file, initialwannier=initialwannier, seed=seed)
def initialize(self, file=None, initialwannier='random', seed=None):
"""Re-initialize current rotation matrix.
Keywords are identical to those of the constructor.
"""
Nw = self.nwannier
Nb = self.nbands
if file is not None:
self.Z_dknn, self.U_kww, self.C_kul = load(paropen(file))
elif initialwannier == 'bloch':
# Set U and C to pick the lowest Bloch states
self.U_kww = np.zeros((self.Nk, Nw, Nw), complex)
self.C_kul = []
for U, M, L in zip(self.U_kww, self.fixedstates_k, self.edf_k):
U[:] = np.identity(Nw, complex)
if L > 0:
self.C_kul.append(
np.identity(Nb - M, complex)[:, :L])
else:
self.C_kul.append([])
elif initialwannier == 'random':
# Set U and C to random (orthogonal) matrices
self.U_kww = np.zeros((self.Nk, Nw, Nw), complex)
self.C_kul = []
for U, M, L in zip(self.U_kww, self.fixedstates_k, self.edf_k):
U[:] = random_orthogonal_matrix(Nw, seed, real=False)
if L > 0:
self.C_kul.append(random_orthogonal_matrix(
Nb - M, seed=seed, real=False)[:, :L])
else:
self.C_kul.append(np.array([]))
else:
# Use initial guess to determine U and C
self.C_kul, self.U_kww = self.calc.initial_wannier(
initialwannier, self.kptgrid, self.fixedstates_k,
self.edf_k, self.spin, self.nbands)
self.update()
def save(self, file):
"""Save information on localization and rotation matrices to file."""
dump((self.Z_dknn, self.U_kww, self.C_kul), paropen(file, 'w'))
def update(self):
# Update large rotation matrix V (from rotation U and coeff C)
for k, M in enumerate(self.fixedstates_k):
self.V_knw[k, :M] = self.U_kww[k, :M]
if M < self.nwannier:
self.V_knw[k, M:] = np.dot(self.C_kul[k], self.U_kww[k, M:])
# else: self.V_knw[k, M:] = 0.0
# Calculate the Zk matrix from the large rotation matrix:
# Zk = V^d[k] Zbloch V[k1]
for d in range(self.Ndir):
for k in range(self.Nk):
k1 = self.kklst_dk[d, k]
self.Z_dkww[d, k] = np.dot(dag(self.V_knw[k]), np.dot(
self.Z_dknn[d, k], self.V_knw[k1]))
# Update the new Z matrix
self.Z_dww = self.Z_dkww.sum(axis=1) / self.Nk
def get_centers(self, scaled=False):
"""Calculate the Wannier centers
::
pos = L / 2pi * phase(diag(Z))
"""
coord_wc = np.angle(self.Z_dww[:3].diagonal(0, 1, 2)).T / (2 * pi) % 1
if not scaled:
coord_wc = np.dot(coord_wc, self.largeunitcell_cc)
return coord_wc
def get_radii(self):
"""Calculate the spread of the Wannier functions.
::
-- / L \ 2 2
radius**2 = - > | --- | ln |Z|
--d \ 2pi /
"""
r2 = -np.dot(self.largeunitcell_cc.diagonal()**2 / (2 * pi)**2,
np.log(abs(self.Z_dww[:3].diagonal(0, 1, 2))**2))
return np.sqrt(r2)
def get_spectral_weight(self, w):
return abs(self.V_knw[:, :, w])**2 / self.Nk
def get_pdos(self, w, energies, width):
"""Projected density of states (PDOS).
Returns the (PDOS) for Wannier function ``w``. The calculation
is performed over the energy grid specified in energies. The
PDOS is produced as a sum of Gaussians centered at the points
of the energy grid and with the specified width.
"""
spec_kn = self.get_spectral_weight(w)
dos = np.zeros(len(energies))
for k, spec_n in enumerate(spec_kn):
eig_n = self.calc.get_eigenvalues(k=kpt, s=self.spin)
for weight, eig in zip(spec_n, eig):
# Add gaussian centered at the eigenvalue
x = ((energies - center) / width)**2
dos += weight * np.exp(-x.clip(0., 40.)) / (sqrt(pi) * width)
return dos
def max_spread(self, directions=[0, 1, 2]):
"""Returns the index of the most delocalized Wannier function
together with the value of the spread functional"""
d = np.zeros(self.nwannier)
for dir in directions:
d[dir] = np.abs(self.Z_dww[dir].diagonal())**2 *self.weight_d[dir]
index = np.argsort(d)[0]
print 'Index:', index
print 'Spread:', d[index]
def translate(self, w, R):
"""Translate the w'th Wannier function
The distance vector R = [n1, n2, n3], is in units of the basis
vectors of the small cell.
"""
for kpt_c, U_ww in zip(self.kpt_kc, self.U_kww):
U_ww[:, w] *= np.exp(2.j * pi * np.dot(np.array(R), kpt_c))
self.update()
def translate_to_cell(self, w, cell):
"""Translate the w'th Wannier function to specified cell"""
scaled_c = np.angle(self.Z_dww[:3, w, w]) * self.kptgrid / (2 * pi)
trans = np.array(cell) - np.floor(scaled_c)
self.translate(w, trans)
def translate_all_to_cell(self, cell=[0, 0, 0]):
"""Translate all Wannier functions to specified cell.
Move all Wannier orbitals to a specific unit cell. There
exists an arbitrariness in the positions of the Wannier
orbitals relative to the unit cell. This method can move all
orbitals to the unit cell specified by ``cell``. For a
`\Gamma`-point calculation, this has no effect. For a
**k**-point calculation the periodicity of the orbitals are
given by the large unit cell defined by repeating the original
unitcell by the number of **k**-points in each direction. In
this case it is usefull to move the orbitals away from the
boundaries of the large cell before plotting them. For a bulk
calculation with, say 10x10x10 **k** points, one could move
the orbitals to the cell [2,2,2]. In this way the pbc
boundary conditions will not be noticed.
"""
scaled_wc = np.angle(self.Z_dww[:3].diagonal(0, 1, 2)).T * \
self.kptgrid / (2 * pi)
trans_wc = np.array(cell)[None] - np.floor(scaled_wc)
for kpt_c, U_ww in zip(self.kpt_kc, self.U_kww):
U_ww *= np.exp(2.j * pi * np.dot(trans_wc, kpt_c))
self.update()
def distances(self, R):
Nw = self.nwannier
cen = self.get_centers()
r1 = cen.repeat(Nw, axis=0).reshape(Nw, Nw, 3)
r2 = cen.copy()
for i in range(3):
r2 += self.unitcell_cc[i] * R[i]
r2 = np.swapaxes(r2.repeat(Nw, axis=0).reshape(Nw, Nw, 3), 0, 1)
return np.sqrt(np.sum((r1 - r2)**2, axis=-1))
def get_hopping(self, R):
"""Returns the matrix H(R)_nm=<0,n|H|R,m>.
::
1 _ -ik.R
H(R) = <0,n|H|R,m> = --- >_ e H(k)
Nk k
where R is the cell-distance (in units of the basis vectors of
the small cell) and n,m are indices of the Wannier functions.
"""
H_ww = np.zeros([self.nwannier, self.nwannier], complex)
for k, kpt_c in enumerate(self.kpt_kc):
phase = np.exp(-2.j * pi * np.dot(np.array(R), kpt_c))
H_ww += self.get_hamiltonian(k) * phase
return H_ww / self.Nk
def get_hamiltonian(self, k=0):
"""Get Hamiltonian at existing k-vector of index k
::
dag
H(k) = V diag(eps ) V
k k k
"""
eps_n = self.calc.get_eigenvalues(kpt=k, spin=self.spin)[:self.nbands]
return np.dot(dag(self.V_knw[k]) * eps_n, self.V_knw[k])
def get_hamiltonian_kpoint(self, kpt_c):
"""Get Hamiltonian at some new arbitrary k-vector
::
_ ik.R
H(k) = >_ e H(R)
R
Warning: This method moves all Wannier functions to cell (0, 0, 0)
"""
if self.verbose:
print 'Translating all Wannier functions to cell (0, 0, 0)'
self.translate_all_to_cell()
max = (self.kptgrid - 1) / 2
N1, N2, N3 = max
Hk = np.zeros([self.nwannier, self.nwannier], complex)
for n1 in xrange(-N1, N1 + 1):
for n2 in xrange(-N2, N2 + 1):
for n3 in xrange(-N3, N3 + 1):
R = np.array([n1, n2, n3], float)
hop_ww = self.get_hopping(R)
phase = np.exp(+2.j * pi * np.dot(R, kpt_c))
Hk += hop_ww * phase
return Hk
def get_function(self, index, repeat=None):
"""Get Wannier function on grid.
Returns an array with the funcion values of the indicated Wannier
function on a grid with the size of the *repeated* unit cell.
For a calculation using **k**-points the relevant unit cell for
eg. visualization of the Wannier orbitals is not the original unit
cell, but rather a larger unit cell defined by repeating the
original unit cell by the number of **k**-points in each direction.
Note that for a `\Gamma`-point calculation the large unit cell
coinsides with the original unit cell.
The large unitcell also defines the periodicity of the Wannier
orbitals.
``index`` can be either a single WF or a coordinate vector in terms
of the WFs.
"""
# Default size of plotting cell is the one corresponding to k-points.
if repeat is None:
repeat = self.kptgrid
N1, N2, N3 = repeat
dim = self.calc.get_number_of_grid_points()
largedim = dim * [N1, N2, N3]
wanniergrid = np.zeros(largedim, dtype=complex)
for k, kpt_c in enumerate(self.kpt_kc):
# The coordinate vector of wannier functions
if type(index) == int:
vec_n = self.V_knw[k, :, index]
else:
vec_n = np.dot(self.V_knw[k], index)
wan_G = np.zeros(dim, complex)
for n, coeff in enumerate(vec_n):
wan_G += coeff * self.calc.get_pseudo_wave_function(
n, k, self.spin, pad=True)
# Distribute the small wavefunction over large cell:
for n1 in xrange(N1):
for n2 in xrange(N2):
for n3 in xrange(N3): # sign?
e = np.exp(-2.j * pi * np.dot([n1, n2, n3], kpt_c))
wanniergrid[n1 * dim[0]:(n1 + 1) * dim[0],
n2 * dim[1]:(n2 + 1) * dim[1],
n3 * dim[2]:(n3 + 1) * dim[2]] += e * wan_G
# Normalization
wanniergrid /= np.sqrt(self.Nk)
return wanniergrid
def write_cube(self, index, fname, repeat=None, real=True):
"""Dump specified Wannier function to a cube file"""
from ase.io.cube import write_cube
# Default size of plotting cell is the one corresponding to k-points.
if repeat is None:
repeat = self.kptgrid
atoms = self.calc.get_atoms() * repeat
func = self.get_function(index, repeat)
# Handle separation of complex wave into real parts
if real:
if self.Nk == 1:
func *= np.exp(-1.j * np.angle(func.max()))
if 0: assert max(abs(func.imag).flat) < 1e-4
func = func.real
else:
func = abs(func)
else:
phase_fname = fname.split('.')
phase_fname.insert(1, 'phase')
phase_fname = '.'.join(phase_fname)
write_cube(phase_fname, atoms, data=np.angle(func))
func = abs(func)
write_cube(fname, atoms, data=func)
def localize(self, step=0.25, tolerance=1e-08,
updaterot=True, updatecoeff=True):
"""Optimize rotation to give maximal localization"""
md_min(self, step, tolerance, verbose=self.verbose,
updaterot=updaterot, updatecoeff=updatecoeff)
def get_functional_value(self):
"""Calculate the value of the spread functional.
::
Tr[|ZI|^2]=sum(I)sum(n) w_i|Z_(i)_nn|^2,
where w_i are weights."""
a_d = np.sum(np.abs(self.Z_dww.diagonal(0, 1, 2))**2, axis=1)
return np.dot(a_d, self.weight_d).real
def get_gradients(self):
# Determine gradient of the spread functional.
#
# The gradient for a rotation A_kij is::
#
# dU = dRho/dA_{k,i,j} = sum(I) sum(k')
# + Z_jj Z_kk',ij^* - Z_ii Z_k'k,ij^*
# - Z_ii^* Z_kk',ji + Z_jj^* Z_k'k,ji
#
# The gradient for a change of coefficients is::
#
# dRho/da^*_{k,i,j} = sum(I) [[(Z_0)_{k} V_{k'} diag(Z^*) +
# (Z_0_{k''})^d V_{k''} diag(Z)] *
# U_k^d]_{N+i,N+j}
#
# where diag(Z) is a square,diagonal matrix with Z_nn in the diagonal,
# k' = k + dk and k = k'' + dk.
#
# The extra degrees of freedom chould be kept orthonormal to the fixed
# space, thus we introduce lagrange multipliers, and minimize instead::
#
# Rho_L=Rho- sum_{k,n,m} lambda_{k,nm} <c_{kn}|c_{km}>
#
# for this reason the coefficient gradients should be multiplied
# by (1 - c c^d).
Nb = self.nbands
Nw = self.nwannier
dU = []
dC = []
for k in xrange(self.Nk):
M = self.fixedstates_k[k]
L = self.edf_k[k]
U_ww = self.U_kww[k]
C_ul = self.C_kul[k]
Utemp_ww = np.zeros((Nw, Nw), complex)
Ctemp_nw = np.zeros((Nb, Nw), complex)
for d, weight in enumerate(self.weight_d):
if abs(weight) < 1.0e-6:
continue
Z_knn = self.Z_dknn[d]
diagZ_w = self.Z_dww[d].diagonal()
Zii_ww = np.repeat(diagZ_w, Nw).reshape(Nw, Nw)
k1 = self.kklst_dk[d, k]
k2 = self.invkklst_dk[d, k]
V_knw = self.V_knw
Z_kww = self.Z_dkww[d]
if L > 0:
Ctemp_nw += weight * np.dot(
np.dot(Z_knn[k], V_knw[k1]) * diagZ_w.conj() +
np.dot(dag(Z_knn[k2]), V_knw[k2]) * diagZ_w,
dag(U_ww))
temp = Zii_ww.T * Z_kww[k].conj() - Zii_ww * Z_kww[k2].conj()
Utemp_ww += weight * (temp - dag(temp))
dU.append(Utemp_ww.ravel())
if L > 0:
# Ctemp now has same dimension as V, the gradient is in the
# lower-right (Nb-M) x L block
Ctemp_ul = Ctemp_nw[M:, M:]
G_ul = Ctemp_ul - np.dot(np.dot(C_ul, dag(C_ul)), Ctemp_ul)
dC.append(G_ul.ravel())
return np.concatenate(dU + dC)
def step(self, dX, updaterot=True, updatecoeff=True):
# dX is (A, dC) where U->Uexp(-A) and C->C+dC
Nw = self.nwannier
Nk = self.Nk
M_k = self.fixedstates_k
L_k = self.edf_k
if updaterot:
A_kww = dX[:Nk * Nw**2].reshape(Nk, Nw, Nw)
for U, A in zip(self.U_kww, A_kww):
H = -1.j * A.conj()
epsilon, Z = np.linalg.eigh(H)
# Z contains the eigenvectors as COLUMNS.
# Since H = iA, dU = exp(-A) = exp(iH) = ZDZ^d
dU = np.dot(Z * np.exp(1.j * epsilon), dag(Z))
if U.dtype == float:
U[:] = np.dot(U, dU).real
else:
U[:] = np.dot(U, dU)
if updatecoeff:
start = 0
for C, unocc, L in zip(self.C_kul, self.nbands - M_k, L_k):
if L == 0 or unocc == 0:
continue
Ncoeff = L * unocc
deltaC = dX[Nk * Nw**2 + start: Nk * Nw**2 + start + Ncoeff]
C += deltaC.reshape(unocc, L)
gram_schmidt(C)
start += Ncoeff
self.update()
| 0.002607 |
# pyOCD debugger
# Copyright (c) 2006-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import copy
from .memory_interface import MemoryInterface
from .memory_map import MemoryMap
class Target(MemoryInterface):
class State(Enum):
"""! @brief States a target processor can be in."""
## Core is executing code.
RUNNING = 1
## Core is halted in debug mode.
HALTED = 2
## Core is being held in reset.
RESET = 3
## Core is sleeping due to a wfi or wfe instruction.
SLEEPING = 4
## Core is locked up.
LOCKUP = 5
class SecurityState(Enum):
"""! @brief Security states for a processor with the Security extension."""
## PE is in the Non-secure state.
NONSECURE = 0
## PE is in the Secure state.
SECURE = 1
class ResetType(Enum):
"""! @brief Available reset methods."""
## Hardware reset via the nRESET signal.
HW = 1
## Software reset using the core's default software reset method.
SW = 2
## Software reset using the AIRCR.SYSRESETREQ bit.
SW_SYSRESETREQ = 3
## Software reset the entire system (alias of #SW_SYSRESETREQ).
SW_SYSTEM = SW_SYSRESETREQ
## Software reset using the AIRCR.VECTRESET bit.
#
# v6-M and v8-M targets do not support VECTRESET, so they will fall back to SW_EMULATED.
SW_VECTRESET = 4
## Software reset the core only (alias of #SW_VECTRESET).
SW_CORE = SW_VECTRESET
## Emulated software reset.
SW_EMULATED = 5
class BreakpointType(Enum):
"""! @brief Types of breakpoints."""
## Hardware breakpoint.
HW = 1
## Software breakpoint.
SW = 2
## Auto will select the best type given the address and available breakpoints.
AUTO = 3
class WatchpointType(Enum):
"""! @brief Types of watchpoints."""
## Watchpoint on read accesses.
READ = 1
## Watchpoint on write accesses.
WRITE = 2
## Watchpoint on either read or write accesses.
READ_WRITE = 3
class VectorCatch:
"""! Vector catch option masks.
These constants can be OR'd together to form any combination of vector catch settings.
"""
## Disable vector catch.
NONE = 0
## Trap on HardFault exception.
HARD_FAULT = (1 << 0)
## Trap on BusFault exception.
BUS_FAULT = (1 << 1)
## Trap on MemManage exception.
MEM_FAULT = (1 << 2)
## Trap on fault occurring during exception entry or exit.
INTERRUPT_ERR = (1 << 3)
## Trap on UsageFault exception caused by state information error, such as an undefined
# instruction exception.
STATE_ERR = (1 << 4)
## Trap on UsageFault exception caused by checking error, for example an alignment check error.
CHECK_ERR = (1 << 5)
## Trap on UsageFault exception caused by a failed access to a coprocessor.
COPROCESSOR_ERR = (1 << 6)
## Trap on local reset.
CORE_RESET = (1 << 7)
## Trap SecureFault.
SECURE_FAULT = (1 << 8)
ALL = (HARD_FAULT | BUS_FAULT | MEM_FAULT | INTERRUPT_ERR
| STATE_ERR | CHECK_ERR | COPROCESSOR_ERR | CORE_RESET
| SECURE_FAULT)
class Event(Enum):
"""! Target notification events."""
## Sent after completing the initialisation sequence.
POST_CONNECT = 1
## Sent prior to disconnecting cores and powering down the DP.
PRE_DISCONNECT = 2
## Sent prior to resume or step.
#
# Associated data is a RunType enum.
PRE_RUN = 3
## Sent after a resume or step operation.
#
# For resume, this event will be sent while the target is still running. Use a halt event
# to trap when the target stops running.
#
# Associated data is a RunType enum.
POST_RUN = 4
## Sent prior to a user-invoked halt.
#
# Associated data is a HaltReason enum, which will currently always be HaltReason.USER.
PRE_HALT = 5
## Sent after the target halts.
#
# Associated data is a HaltReason enum.
POST_HALT = 6
## Sent before executing a reset operation.
PRE_RESET = 7
## Sent after the target has been reset.
POST_RESET = 8
## Sent before programming target flash.
PRE_FLASH_PROGRAM = 9
## Sent after target flash has been reprogrammed.
POST_FLASH_PROGRAM = 10
class RunType(Enum):
"""! Run type for run notifications.
An enum of this type is set as the data attribute on PRE_RUN and POST_RUN notifications.
"""
## Target is being resumed.
RESUME = 1
## Target is being stepped one instruction.
STEP = 2
class HaltReason(Enum):
"""! Halt type for halt notifications.
An value of this type is returned from Target.get_halt_reason(). It is also used as the data
attribute on PRE_HALT and POST_HALT notifications.
"""
## Target halted due to user action.
USER = 1
## Target halted because of a halt or step event.
DEBUG = 2
## Breakpoint event.
BREAKPOINT = 3
## DWT watchpoint event.
WATCHPOINT = 4
## Vector catch event.
VECTOR_CATCH = 5
## External debug request.
EXTERNAL = 6
## PMU event. v8.1-M only.
PMU = 7
def __init__(self, session, memory_map=None):
self._session = session
self._delegate = None
# Make a target-specific copy of the memory map. This is safe to do without locking
# because the memory map may not be mutated until target initialization.
self.memory_map = memory_map.clone() if memory_map else MemoryMap()
self._svd_location = None
self._svd_device = None
@property
def session(self):
return self._session
@property
def delegate(self):
return self._delegate
@delegate.setter
def delegate(self, the_delegate):
self._delegate = the_delegate
def delegate_implements(self, method_name):
return (self._delegate is not None) and (hasattr(self._delegate, method_name))
def call_delegate(self, method_name, *args, **kwargs):
if self.delegate_implements(method_name):
return getattr(self._delegate, method_name)(*args, **kwargs)
else:
# The default action is always taken if None is returned.
return None
@property
def svd_device(self):
return self._svd_device
@property
def supported_security_states(self):
raise NotImplementedError()
@property
def core_registers(self):
raise NotImplementedError()
def is_locked(self):
return False
def create_init_sequence(self):
raise NotImplementedError()
def init(self):
raise NotImplementedError()
def disconnect(self, resume=True):
pass
def flush(self):
self.session.probe.flush()
def halt(self):
raise NotImplementedError()
def step(self, disable_interrupts=True, start=0, end=0):
raise NotImplementedError()
def resume(self):
raise NotImplementedError()
def mass_erase(self):
raise NotImplementedError()
def read_core_register(self, id):
raise NotImplementedError()
def write_core_register(self, id, data):
raise NotImplementedError()
def read_core_register_raw(self, reg):
raise NotImplementedError()
def read_core_registers_raw(self, reg_list):
raise NotImplementedError()
def write_core_register_raw(self, reg, data):
raise NotImplementedError()
def write_core_registers_raw(self, reg_list, data_list):
raise NotImplementedError()
def find_breakpoint(self, addr):
raise NotImplementedError()
def set_breakpoint(self, addr, type=BreakpointType.AUTO):
raise NotImplementedError()
def get_breakpoint_type(self, addr):
raise NotImplementedError()
def remove_breakpoint(self, addr):
raise NotImplementedError()
def set_watchpoint(self, addr, size, type):
raise NotImplementedError()
def remove_watchpoint(self, addr, size, type):
raise NotImplementedError()
def reset(self, reset_type=None):
raise NotImplementedError()
def reset_and_halt(self, reset_type=None):
raise NotImplementedError()
def get_state(self):
raise NotImplementedError()
def get_security_state(self):
raise NotImplementedError()
def get_halt_reason(self):
raise NotImplementedError()
@property
def run_token(self):
return 0
def is_running(self):
return self.get_state() == Target.State.RUNNING
def is_halted(self):
return self.get_state() == Target.State.HALTED
def get_memory_map(self):
return self.memory_map
def set_vector_catch(self, enableMask):
raise NotImplementedError()
def get_vector_catch(self):
raise NotImplementedError()
def get_target_context(self, core=None):
raise NotImplementedError()
| 0.007575 |
# -*- coding: utf-8 -*- vim:fileencoding=utf-8:
# vim: tabstop=4:shiftwidth=4:softtabstop=4:expandtab
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.template.defaultfilters import filesizeformat
from flowspec.models import *
from peers.models import *
from accounts.models import *
from ipaddr import *
from flowspec.validators import (
clean_source,
clean_destination,
clean_expires,
clean_route_form
)
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.conf import settings
import datetime
from django.core.mail import send_mail
class UserProfileForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = UserProfile
class RouteForm(forms.ModelForm):
class Meta:
fields = '__all__'
model = Route
def clean_applier(self):
applier = self.cleaned_data['applier']
if applier:
return self.cleaned_data["applier"]
else:
raise forms.ValidationError('This field is required.')
def clean_source(self):
# run validator which is used by rest framework too
source = self.cleaned_data['source']
res = clean_source(
User.objects.get(pk=self.data['applier']),
source
)
if res != source:
raise forms.ValidationError(res)
else:
return res
def clean_destination(self):
destination = self.cleaned_data.get('destination')
res = clean_destination(
User.objects.get(pk=self.data['applier']),
destination
)
if destination != res:
raise forms.ValidationError(res)
else:
return res
def clean_expires(self):
date = self.cleaned_data['expires']
res = clean_expires(date)
if date != res:
raise forms.ValidationError(res)
return res
def clean(self):
if self.errors:
raise forms.ValidationError(_('Errors in form. Please review and fix them: %s' % ", ".join(self.errors)))
error = clean_route_form(self.cleaned_data)
if error:
raise forms.ValidationError(error)
# check if same rule exists with other name
user = self.cleaned_data['applier']
if user.is_superuser:
peers = Peer.objects.all()
else:
peers = user.userprofile.peers.all()
existing_routes = Route.objects.all()
existing_routes = existing_routes.filter(applier__userprofile__peers__in=peers)
name = self.cleaned_data.get('name', None)
protocols = self.cleaned_data.get('protocol', None)
source = self.cleaned_data.get('source', None)
sourceports = self.cleaned_data.get('sourceport', None)
ports = self.cleaned_data.get('port', None)
destination = self.cleaned_data.get('destination', None)
destinationports = self.cleaned_data.get('destinationport', None)
user = self.cleaned_data.get('applier', None)
if source:
source = IPNetwork(source).compressed
existing_routes = existing_routes.filter(source=source)
else:
existing_routes = existing_routes.filter(source=None)
if protocols:
route_pk_list=get_matchingprotocol_route_pks(protocols, existing_routes)
if route_pk_list:
existing_routes = existing_routes.filter(pk__in=route_pk_list)
else:
existing_routes = existing_routes.filter(protocol=None)
else:
existing_routes = existing_routes.filter(protocol=None)
if sourceports:
route_pk_list=get_matchingport_route_pks(sourceports, existing_routes)
if route_pk_list:
existing_routes = existing_routes.filter(pk__in=route_pk_list)
else:
existing_routes = existing_routes.filter(sourceport=None)
if destinationports:
route_pk_list=get_matchingport_route_pks(destinationports, existing_routes)
if route_pk_list:
existing_routes = existing_routes.filter(pk__in=route_pk_list)
else:
existing_routes = existing_routes.filter(destinationport=None)
if ports:
route_pk_list=get_matchingport_route_pks(ports, existing_routes)
if route_pk_list:
existing_routes = existing_routes.filter(pk__in=route_pk_list)
else:
existing_routes = existing_routes.filter(port=None)
for route in existing_routes:
if name != route.name:
existing_url = reverse('edit-route', args=[route.name])
if IPNetwork(destination) in IPNetwork(route.destination) or IPNetwork(route.destination) in IPNetwork(destination):
raise forms.ValidationError('Found an exact %s rule, %s with destination prefix %s<br>To avoid overlapping try editing rule <a href=\'%s\'>%s</a>' % (route.status, route.name, route.destination, existing_url, route.name))
return self.cleaned_data
class ThenPlainForm(forms.ModelForm):
# action = forms.CharField(initial='rate-limit')
class Meta:
fields = '__all__'
model = ThenAction
def clean_action_value(self):
action_value = self.cleaned_data['action_value']
if action_value:
try:
assert(int(action_value))
if int(action_value) < 50:
raise forms.ValidationError(_('Rate-limiting cannot be < 50kbps'))
return "%s" %self.cleaned_data["action_value"]
except:
raise forms.ValidationError(_('Rate-limiting should be an integer < 50'))
else:
raise forms.ValidationError(_('Cannot be empty'))
def clean_action(self):
action = self.cleaned_data['action']
if action != 'rate-limit':
raise forms.ValidationError(_('Cannot select something other than rate-limit'))
else:
return self.cleaned_data["action"]
class PortPlainForm(forms.ModelForm):
# action = forms.CharField(initial='rate-limit')
class Meta:
fields = '__all__'
model = MatchPort
def clean_port(self):
port = self.cleaned_data['port']
if port:
try:
if int(port) > 65535 or int(port) < 0:
raise forms.ValidationError(_('Port should be < 65535 and >= 0'))
return "%s" %self.cleaned_data["port"]
except forms.ValidationError:
raise forms.ValidationError(_('Port should be < 65535 and >= 0'))
except:
raise forms.ValidationError(_('Port should be an integer'))
else:
raise forms.ValidationError(_('Cannot be empty'))
def value_list_to_list(valuelist):
vl = []
for val in valuelist:
vl.append(val[0])
return vl
def get_matchingport_route_pks(portlist, routes):
route_pk_list = []
ports_value_list = value_list_to_list(portlist.values_list('port').order_by('port'))
for route in routes:
rsp = value_list_to_list(route.destinationport.all().values_list('port').order_by('port'))
if rsp and rsp == ports_value_list:
route_pk_list.append(route.pk)
return route_pk_list
def get_matchingprotocol_route_pks(protocolist, routes):
route_pk_list = []
protocols_value_list = value_list_to_list(protocolist.values_list('protocol').order_by('protocol'))
for route in routes:
rsp = value_list_to_list(route.protocol.all().values_list('protocol').order_by('protocol'))
if rsp and rsp == protocols_value_list:
route_pk_list.append(route.pk)
return route_pk_list
| 0.00303 |
"""
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
---
*** The McDiarmid Drift Detection Method - Arithmetic Scheme (MDDM_A) Implementation ***
Paper: Pesaranghader, Ali, et al. "McDiarmid Drift Detection Method for Evolving Data Streams."
Published in: International Joint Conference on Neural Network (IJCNN 2018)
URL: https://arxiv.org/abs/1710.02030
"""
import math
from dictionary.tornado_dictionary import TornadoDic
from drift_detection.detector import SuperDetector
class MDDM_A(SuperDetector):
"""The McDiarmid Drift Detection Method - Arithmetic Scheme (MDDM_A) class."""
DETECTOR_NAME = TornadoDic.MDDM_A
def __init__(self, n=100, difference=0.01, delta=0.000001):
super().__init__()
self.win = []
self.n = n
self.difference = difference
self.delta = delta
self.e = math.sqrt(0.5 * self.cal_sigma() * (math.log(1 / self.delta, math.e)))
self.u_max = 0
self.DETECTOR_NAME += "." + str(n)
def run(self, pr):
drift_status = False
if len(self.win) == self.n:
self.win.pop(0)
self.win.append(pr)
if len(self.win) == self.n:
u = self.cal_w_sigma()
self.u_max = u if u > self.u_max else self.u_max
drift_status = True if (self.u_max - u > self.e) else False
return False, drift_status
def reset(self):
super().reset()
self.win.clear()
self.u_max = 0
def cal_sigma(self):
sum_, sigma = 0, 0
for i in range(self.n):
sum_ += (1 + i * self.difference)
for i in range(self.n):
sigma += math.pow((1 + i * self.difference) / sum_, 2)
return sigma
def cal_w_sigma(self):
total_sum, win_sum = 0, 0
for i in range(self.n):
total_sum += 1 + i * self.difference
win_sum += self.win[i] * (1 + i * self.difference)
return win_sum / total_sum
def get_settings(self):
settings = [str(self.n) + "." + str(self.delta),
"$n$:" + str(self.n) + ", " +
"$d$:" + str(self.difference) + ", " +
"$\delta$:" + str(self.delta).upper()]
return settings
| 0.002068 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: pip
short_description: Manages Python library dependencies.
description:
- "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
or C(requirements)."
version_added: "0.7"
options:
name:
description:
- The name of a Python library to install or the url of the remote package.
- As of 2.2 you can supply a list of names.
required: false
default: null
version:
description:
- The version number to install of the Python library specified in the I(name) parameter
required: false
default: null
requirements:
description:
- The path to a pip requirements file, which should be local to the remote system.
File can be specified as a relative path if using the chdir option.
required: false
default: null
virtualenv:
description:
- An optional path to a I(virtualenv) directory to install into.
It cannot be specified together with the 'executable' parameter
(added in 2.1).
If the virtualenv does not exist, it will be created before installing
packages. The optional virtualenv_site_packages, virtualenv_command,
and virtualenv_python options affect the creation of the virtualenv.
required: false
default: null
virtualenv_site_packages:
version_added: "1.0"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command or a pathname to the command to create the virtual
environment with. For example C(pyvenv), C(virtualenv),
C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
required: false
default: virtualenv
virtualenv_python:
version_added: "2.0"
description:
- The Python executable used for creating the virtual environment.
For example C(python3.5), C(python2.7). When not specified, the
Python version used to run the ansible module is used. This parameter
should not be used when C(virtualenv_command) is using C(pyvenv) or
the C(-m venv) module.
required: false
default: null
state:
description:
- The state of module
- The 'forcereinstall' option is only available in Ansible 2.1 and above.
required: false
default: present
choices: [ "present", "absent", "latest", "forcereinstall" ]
extra_args:
description:
- Extra arguments passed to pip.
required: false
default: null
version_added: "1.0"
editable:
description:
- Pass the editable flag.
required: false
default: false
version_added: "2.0"
chdir:
description:
- cd into this directory before running the command
version_added: "1.3"
required: false
default: null
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run pip for a specific version of Python installed in the system. For
example C(pip-3.3), if there are both Python 2.7 and 3.3 installations
in the system and you want to run pip for the Python 3.3 installation.
It cannot be specified together with the 'virtualenv' parameter (added in 2.1).
By default, it will take the appropriate version for the python interpreter
use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2.
version_added: "1.3"
required: false
default: null
umask:
description:
- The system umask to apply before installing the pip package. This is
useful, for example, when installing on systems that have a very
restrictive umask by default (e.g., 0077) and you want to pip install
packages which are to be used by all users. Note that this requires you
to specify desired umask mode in octal, with a leading 0 (e.g., 0077).
version_added: "2.1"
required: false
default: null
notes:
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be
installed on the remote host if the virtualenv parameter is specified and
the virtualenv needs to be created.
- By default, this module will use the appropriate version of pip for the
interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise)
requirements: [ "virtualenv", "pip" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Install (Bottle) python package.
- pip:
name: bottle
# Install (Bottle) python package on version 0.11.
- pip:
name: bottle
version: 0.11
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
- pip:
name: svn+http://myrepo/svn/MyApp#egg=MyApp
# Install MyApp using one of the remote protocols (bzr+,hg+,git+).
- pip:
name: git+http://myrepo/app/MyApp
# Install (MyApp) from local tarball
- pip:
name: file:///path/to/MyApp.tar.gz
# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_site_packages: yes
# Install (Bottle) into the specified (virtualenv), using Python 2.7
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_command: virtualenv-2.7
# Install (Bottle) within a user home directory.
- pip:
name: bottle
extra_args: --user
# Install specified python requirements.
- pip:
requirements: /my_app/requirements.txt
# Install specified python requirements in indicated (virtualenv).
- pip:
requirements: /my_app/requirements.txt
virtualenv: /my_app/venv
# Install specified python requirements and custom Index URL.
- pip:
requirements: /my_app/requirements.txt
extra_args: -i https://example.com/pypi/simple
# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.
- pip:
name: bottle
executable: pip-3.3
# Install (Bottle), forcing reinstallation if it's already installed
- pip:
name: bottle
state: forcereinstall
# Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it)
- pip:
name: bottle
umask: 0022
become: True
'''
import os
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule, is_executable
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
#: Python one-liners to be run at the command line that will determine the
# installed version for these special libraries. These are libraries that
# don't end up in the output of pip freeze.
_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
def _get_cmd_options(module, cmd):
thiscmd = cmd + " --help"
rc, stdout, stderr = module.run_command(thiscmd)
if rc != 0:
module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
words = stdout.strip().split()
cmd_options = [x for x in words if x.startswith('--')]
return cmd_options
def _get_full_name(name, version=None):
if version is None:
resp = name
else:
resp = name + '==' + version
return resp
def _get_packages(module, pip, chdir):
'''Return results of pip command to get packages.'''
# Try 'pip list' command first.
command = '%s list --format=freeze' % pip
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
# If there was an error (pip version too old) then use 'pip freeze'.
if rc != 0:
command = '%s freeze' % pip
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
return (command, out, err)
def _is_present(name, version, installed_pkgs, pkg_command):
'''Return whether or not package is installed.'''
for pkg in installed_pkgs:
if '==' in pkg:
pkg_name, pkg_version = pkg.split('==')
else:
continue
if pkg_name == name and (version is None or version == pkg_version):
return True
return False
def _get_pip(module, env=None, executable=None):
# Older pip only installed under the "/usr/bin/pip" name. Many Linux
# distros install it there.
# By default, we try to use pip required for the current python
# interpreter, so people can use pip to install modules dependencies
candidate_pip_basenames = ('pip2', 'pip')
if PY3:
# pip under python3 installs the "/usr/bin/pip3" name
candidate_pip_basenames = ('pip3',)
pip = None
if executable is not None:
if os.path.isabs(executable):
pip = executable
else:
# If you define your own executable that executable should be the only candidate.
# As noted in the docs, executable doesn't work with virtualenvs.
candidate_pip_basenames = (executable,)
if pip is None:
if env is None:
opt_dirs = []
for basename in candidate_pip_basenames:
pip = module.get_bin_path(basename, False, opt_dirs)
if pip is not None:
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find any of %s to use. pip'
' needs to be installed.' % ', '.join(candidate_pip_basenames))
else:
# If we're using a virtualenv we must use the pip from the
# virtualenv
venv_dir = os.path.join(env, 'bin')
candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
for basename in candidate_pip_basenames:
candidate = os.path.join(venv_dir, basename)
if os.path.exists(candidate) and is_executable(candidate):
pip = candidate
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find pip in the virtualenv,'
' %s, under any of these names: %s. Make sure pip is'
' present in the virtualenv.' % (env,
', '.join(candidate_pip_basenames)))
return pip
def _fail(module, cmd, out, err):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg)
def _get_package_info(module, package, env=None):
"""This is only needed for special packages which do not show up in pip freeze
pip and setuptools fall into this category.
:returns: a string containing the version number if the package is
installed. None if the package is not installed.
"""
if env:
opt_dirs = ['%s/bin' % env]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if python_bin is None:
formatted_dep = None
else:
rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = '%s==%s' % (package, out.strip())
return formatted_dep
def main():
state_map = dict(
present='install',
absent='uninstall -y',
latest='install -U',
forcereinstall='install -U --force-reinstall',
)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=state_map.keys()),
name=dict(type='list'),
version=dict(type='str'),
requirements=dict(type='str'),
virtualenv=dict(type='path'),
virtualenv_site_packages=dict(default=False, type='bool'),
virtualenv_command=dict(default='virtualenv', type='path'),
virtualenv_python=dict(type='str'),
use_mirrors=dict(default=True, type='bool'),
extra_args=dict(type='str'),
editable=dict(default=False, type='bool'),
chdir=dict(type='path'),
executable=dict(type='path'),
umask=dict(type='str'),
),
required_one_of=[['name', 'requirements']],
mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
virtualenv_python = module.params['virtualenv_python']
chdir = module.params['chdir']
umask = module.params['umask']
if umask and not isinstance(umask, int):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=to_native(sys.exc_info()[1]))
old_umask = None
if umask is not None:
old_umask = os.umask(umask)
try:
if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest')
if chdir is None:
# this is done to avoid permissions issues with privilege escalation and virtualenvs
chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
if env:
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
cmd = module.params['virtualenv_command']
if os.path.basename(cmd) == cmd:
cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
else:
cmd_opts = _get_cmd_options(module, cmd)
if '--no-site-packages' in cmd_opts:
cmd += ' --no-site-packages'
# -p is a virtualenv option, not compatible with pyenv or venv
# this if validates if the command being used is not any of them
if not any(ex in module.params['virtualenv_command'] for ex in ('pyvenv', '-m venv')):
if virtualenv_python:
cmd += ' -p%s' % virtualenv_python
elif PY3:
# Ubuntu currently has a patch making virtualenv always
# try to use python2. Since Ubuntu16 works without
# python2 installed, this is a problem. This code mimics
# the upstream behaviour of using the python which invoked
# virtualenv to determine which python is used inside of
# the virtualenv (when none are specified).
cmd += ' -p%s' % sys.executable
# if venv or pyvenv are used and virtualenv_python is defined, then
# virtualenv_python is ignored, this has to be acknowledged
elif module.params['virtualenv_python']:
module.fail_json(
msg='virtualenv_python should not be used when'
' using the venv module or pyvenv as virtualenv_command'
)
cmd = "%s %s" % (cmd, env)
rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if rc != 0:
_fail(module, cmd, out, err)
pip = _get_pip(module, env, module.params['executable'])
cmd = '%s %s' % (pip, state_map[state])
# If there's a virtualenv we want things we install to be able to use other
# installations that exist as binaries within this virtualenv. Example: we
# install cython and then gevent -- gevent needs to use the cython binary,
# not just a python package that will be found by calling the right python.
# So if there's a virtualenv, we add that bin/ to the beginning of the PATH
# in run_command by setting path_prefix here.
path_prefix = None
if env:
path_prefix = "/".join(pip.split('/')[:-1])
# Automatically apply -e option to extra_args when source is a VCS url. VCS
# includes those beginning with svn+, git+, hg+ or bzr+
has_vcs = False
if name:
for pkg in name:
if bool(pkg and re.match(r'(svn|git|hg|bzr)\+', pkg)):
has_vcs = True
break
if module.params['editable']:
args_list = [] # used if extra_args is not used at all
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
# Ok, we will reconstruct the option string
extra_args = ' '.join(args_list)
if extra_args:
cmd += ' %s' % extra_args
if name:
for pkg in name:
cmd += ' %s' % _get_full_name(pkg, version)
else:
if requirements:
cmd += ' -r %s' % requirements
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True)
pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
# Older versions of pip (pre-1.3) do not have pip list.
# pip freeze does not list setuptools or pip in its output
# So we need to get those via a specialcase
for pkg in ('setuptools', 'pip'):
if pkg in name:
formatted_dep = _get_package_info(module, pkg, env)
if formatted_dep is not None:
pkg_list.append(formatted_dep)
out += '%s\n' % formatted_dep
for pkg in name:
is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (state == 'present' and not is_present) or (state == 'absent' and is_present):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
out_freeze_before = None
if requirements or has_vcs:
_, out_freeze_before, _ = _get_packages(module, pip, chdir)
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and \
('not installed' in out_pip or 'not installed' in err_pip):
pass # rc is 1 when attempting to uninstall non-installed package
elif rc != 0:
_fail(module, cmd, out, err)
if state == 'absent':
changed = 'Successfully uninstalled' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
_, out_freeze_after, _ = _get_packages(module, pip, chdir)
changed = out_freeze_before != out_freeze_after
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
state=state, requirements=requirements, virtualenv=env,
stdout=out, stderr=err)
finally:
if old_umask is not None:
os.umask(old_umask)
if __name__ == '__main__':
main()
| 0.002441 |
from AppKit import NSLevelIndicator, NSLevelIndicatorCell, NSTickMarkAbove, NSTickMarkBelow, \
NSDiscreteCapacityLevelIndicatorStyle, NSContinuousCapacityLevelIndicatorStyle, \
NSRatingLevelIndicatorStyle, NSRelevancyLevelIndicatorStyle, NSImage
from vanilla.vanillaBase import VanillaBaseControl
# This control is available in OS 10.4+.
# Cause a NameError if in an earlier OS.
NSLevelIndicator
_tickPositionMap = {
"above": NSTickMarkAbove,
"below": NSTickMarkBelow,
}
_levelIndicatorStyleMap = {
"discrete": NSDiscreteCapacityLevelIndicatorStyle,
"continuous": NSContinuousCapacityLevelIndicatorStyle,
"rating": NSRatingLevelIndicatorStyle,
"relevancy": NSRelevancyLevelIndicatorStyle,
}
class LevelIndicator(VanillaBaseControl):
"""
A control which shows a value on a linear scale.::
from vanilla import *
class LevelIndicatorDemo(object):
def __init__(self):
self.w = Window((200, 68))
self.w.discreteIndicator = LevelIndicator(
(10, 10, -10, 18), callback=self.levelIndicatorCallback)
self.w.continuousIndicator = LevelIndicator(
(10, 40, -10, 18), style="continuous",
callback=self.levelIndicatorCallback)
self.w.open()
def levelIndicatorCallback(self, sender):
print("level indicator edit!", sender.get())
LevelIndicatorDemo()
**posSize** Tuple of form *(left, top, width, height)* representing the position
and size of the level indicator.
+-------------------------------+
| **Standard Dimensions()** |
+-------------------------------+
| *discrete without ticks* |
+-------------------------------+
| H | 18 |
+-------------------------------+
| *discrete with minor ticks* |
+-------------------------------+
| H | 22 |
+-------------------------------+
| *discrete with major ticks* |
+-------------------------------+
| H | 25 |
+-------------------------------+
| *continuous without ticks* |
+-------------------------------+
| H | 16 |
+-------------------------------+
| *continuous with minor ticks* |
+-------------------------------+
| H | 20 |
+-------------------------------+
| *continuous with major ticks* |
+-------------------------------+
| H | 23 |
+-------------------------------+
**style** The style of the level indicator. The options are:
+--------------+-------------------+
| "continuous" | A continuous bar. |
+--------------+-------------------+
| "discrete" | A segmented bar. |
+--------------+-------------------+
**value** The initial value of the level indicator.
**minValue** The minimum value allowed by the level indicator.
**maxValue** The maximum value allowed by the level indicator.
**warningValue** The value at which the filled portions of the
level indicator should display the warning color.
**criticalValue** The value at which the filled portions of the
level indicator should display the critical color.
**tickMarkPosition** The position of the tick marks in relation
to the level indicator. The options are:
+---------+
| "above" |
+---------+
| "below" |
+---------+
**minorTickMarkCount** The number of minor tick marcks to be displayed
on the level indicator. If *None* is given, no minor tick marks will be displayed.
**majorTickMarkCount** The number of major tick marcks to be displayed on the level
indicator. If *None* is given, no major tick marks will be displayed.
**callback** The method to be called when the level indicator has been edited.
If no callback is given, the level indicator will not be editable.
"""
nsLevelIndicatorClass = NSLevelIndicator
def __init__(self, posSize, style="discrete",
value=5, minValue=0, maxValue=10, warningValue=None, criticalValue=None,
tickMarkPosition=None, minorTickMarkCount=None, majorTickMarkCount=None,
callback=None):
self._setupView(self.nsLevelIndicatorClass, posSize, callback=callback)
self._nsObject.cell().setLevelIndicatorStyle_(_levelIndicatorStyleMap[style])
self._nsObject.setMinValue_(minValue)
self._nsObject.setMaxValue_(maxValue)
self._nsObject.setFloatValue_(value)
if warningValue is not None:
self._nsObject.setWarningValue_(warningValue)
if criticalValue is not None:
self._nsObject.setCriticalValue_(criticalValue)
if tickMarkPosition is not None:
self._nsObject.setTickMarkPosition_(_tickPositionMap[tickMarkPosition])
if minorTickMarkCount is not None:
self._nsObject.setNumberOfTickMarks_(minorTickMarkCount)
if majorTickMarkCount is not None:
self._nsObject.setNumberOfMajorTickMarks_(majorTickMarkCount)
if callback is None:
self._nsObject.setEnabled_(False)
def getNSLevelIndicator(self):
"""
Return the *NSLevelIndicator* that this object wraps.
"""
return self._nsObject
def set(self, value):
"""
Set the value of the level indicator.
"""
self._nsObject.setFloatValue_(value)
def get(self):
"""
Get the value of the level indicator.
"""
return self._nsObject.floatValue()
def setMinValue(self, value):
"""
Set the minimum value of the level indicator.
"""
self._nsObject.setMinValue_(value)
def getMinValue(self):
"""
Get the minimum value of the level indicator.
"""
return self._nsObject.minValue()
def setMaxValue(self, value):
"""
Set the maximum of the level indicator.
"""
self._nsObject.setMaxValue_(value)
def getMaxValue(self):
"""
Get the maximum of the level indicator.
"""
return self._nsObject.maxValue()
def setWarningValue(self, value):
"""
Set the warning value of the level indicator.
"""
self._nsObject.setWarningValue_(value)
def getWarningValue(self, value):
"""
Get the warning value of the level indicator.
"""
return self._nsObject.warningValue()
def setCriticalValue(self, value):
"""
Set the critical value of the level indicator.
"""
self._nsObject.setCriticalValue_(value)
def getCriticalValue(self, value):
"""
Get the critical value of the level indicator.
"""
return self._nsObject.criticalValue()
def LevelIndicatorListCell(style="discrete",
minValue=0, maxValue=10, warningValue=None, criticalValue=None,
imagePath=None, imageNamed=None, imageObject=None):
"""
An object that displays a level indicator in a List column.
**This object should only be used in the *columnDescriptions* argument
during the construction of a List.**::
from vanilla import *
class LevelIndicatorListCellDemo(object):
def __init__(self):
self.w = Window((340, 140))
items = [
{"discrete": 3, "continuous": 4, "rating": 1, "relevancy": 9},
{"discrete": 8, "continuous": 3, "rating": 5, "relevancy": 5},
{"discrete": 3, "continuous": 7, "rating": 3, "relevancy": 4},
{"discrete": 2, "continuous": 5, "rating": 4, "relevancy": 7},
{"discrete": 6, "continuous": 9, "rating": 3, "relevancy": 2},
{"discrete": 4, "continuous": 0, "rating": 6, "relevancy": 8},
]
columnDescriptions = [
{"title": "discrete",
"cell": LevelIndicatorListCell(style="discrete", warningValue=7, criticalValue=9)},
{"title": "continuous",
"cell": LevelIndicatorListCell(style="continuous", warningValue=7, criticalValue=9)},
{"title": "rating",
"cell": LevelIndicatorListCell(style="rating", maxValue=6)},
{"title": "relevancy",
"cell": LevelIndicatorListCell(style="relevancy")},
]
self.w.list = List((0, 0, -0, -0), items=items,
columnDescriptions=columnDescriptions)
self.w.open()
LevelIndicatorListCellDemo()
**style** The style of the level indicator. The options are:
+--------------+-----------------------------------------+
| "continuous" | A continuous bar. |
+--------------+-----------------------------------------+
| "discrete" | A segmented bar. |
+--------------+-----------------------------------------+
| "rating" | A row of stars. Similar to the rating |
| | indicator in iTunes. |
+--------------+-----------------------------------------+
| "relevancy" | A row of lines. Similar to the search |
| | result relevancy indicator in Mail. |
+--------------+-----------------------------------------+
**minValue** The minimum value allowed by the level indicator.
**maxValue** The maximum value allowed by the level indicator.
**warningValue** The value at which the filled portions of the
level indicator should display the warning color. Applies only to
discrete and continuous level indicators.
**criticalValue** The value at which the filled portions of the
level indicator should display the critical color. Applies only to
discrete and continuous level indicators.
"""
cell = NSLevelIndicatorCell.alloc().init()
cell.setLevelIndicatorStyle_(_levelIndicatorStyleMap[style])
cell.setMinValue_(minValue)
cell.setMaxValue_(maxValue)
if warningValue is not None:
cell.setWarningValue_(warningValue)
if criticalValue is not None:
cell.setCriticalValue_(criticalValue)
if imagePath is not None:
image = NSImage.alloc().initWithContentsOfFile_(imagePath)
elif imageNamed is not None:
image = NSImage.imageNamed_(imageNamed)
elif imageObject is not None:
image = imageObject
if imageObject is not None:
cell.setImage_(image)
return cell
| 0.002315 |
single = 'X one two three four five six seven eight nine'.split()
single[0] = ''
teens = 'ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen'.split()
tens = 'X X twenty thirty forty fifty sixty seventy eighty ninety'.split()
tens[0] = ''
tens[1] = ''
def getNumWord(num):
assert 0 <= num <= 1000, str(num) + ' is not between 1 and 1000'
if num == 1000:
return 'onethousand'
s = str(num)
if len(s) == 3:
tensPart = getNumWord(num % 100)
if tensPart != '':
return single[num // 100] + 'hundredand' + tensPart
else:
return single[num // 100] + 'hundred'
elif len(s) == 2:
if num >= 20:
return tens[num // 10] + '' + single[num % 10]
else:
return teens[num % 10]
elif len(s) == 1:
return single[num]
assert len(''.join(getNumWord(342))) == 23
assert len(''.join(getNumWord(115))) == 20
allNums = []
for i in range(1, 1001):
allNums.append(getNumWord(i))
print(len(''.join(allNums)))
| 0.003813 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .health_evaluation import HealthEvaluation
class ReplicasHealthEvaluation(HealthEvaluation):
"""Represents health evaluation for replicas, containing health evaluations
for each unhealthy replica that impacted current aggregated health state.
Can be returned when evaluating partition health and the aggregated
health state is either Error or Warning.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str
:param description: Description of the health evaluation, which
represents a summary of the evaluation process.
:type description: str
:param Kind: Polymorphic Discriminator
:type Kind: str
:param max_percent_unhealthy_replicas_per_partition: Maximum allowed
percentage of unhealthy replicas per partition from the
ApplicationHealthPolicy.
:type max_percent_unhealthy_replicas_per_partition: int
:param total_count: Total number of replicas in the partition from the
health store.
:type total_count: long
:param unhealthy_evaluations: List of unhealthy evaluations that led to
the aggregated health state. Includes all the unhealthy
ReplicaHealthEvaluation that impacted the aggregated health.
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
"""
_validation = {
'Kind': {'required': True},
}
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'Kind': {'key': 'Kind', 'type': 'str'},
'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'},
'total_count': {'key': 'TotalCount', 'type': 'long'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
}
def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_replicas_per_partition=None, total_count=None, unhealthy_evaluations=None):
super(ReplicasHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description)
self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition
self.total_count = total_count
self.unhealthy_evaluations = unhealthy_evaluations
self.Kind = 'Replicas'
| 0.002325 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2011 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
# Zato
from zato.admin.web.forms.definition.jms_wmq import CreateForm, EditForm
from zato.admin.web.views import CreateEdit, Delete as _Delete, Index as _Index
from zato.common.odb.model import ConnDefWMQ
logger = logging.getLogger(__name__)
class Index(_Index):
method_allowed = 'GET'
url_name = 'def-jms-wmq'
template = 'zato/definition/jms_wmq.html'
service_name = 'zato.definition.jms-wmq.get-list'
output_class = ConnDefWMQ
class SimpleIO(_Index.SimpleIO):
input_required = ('cluster_id',)
output_required = ('id', 'name', 'host', 'port', 'queue_manager', 'channel', 'cache_open_send_queues',
'cache_open_receive_queues', 'use_shared_connections', 'ssl', 'ssl_cipher_spec', 'ssl_key_repository', 'needs_mcd', 'max_chars_printed')
output_repeated = True
def handle(self):
return {
'create_form': CreateForm(),
'edit_form': EditForm(prefix='edit'),
}
class _CreateEdit(CreateEdit):
method_allowed = 'POST'
class SimpleIO(CreateEdit.SimpleIO):
input_required = ('name', 'host', 'port', 'queue_manager', 'channel', 'cache_open_send_queues', 'cache_open_receive_queues',
'use_shared_connections', 'ssl', 'ssl_cipher_spec', 'ssl_key_repository', 'needs_mcd', 'max_chars_printed')
output_required = ('id',)
def success_message(self, item):
return 'Successfully {0} the JMS WebSphere MQ definition [{1}]'.format(self.verb, item.name)
class Create(_CreateEdit):
url_name = 'def-jms-wmq-create'
service_name = 'zato.definition.jms-wmq.create'
class Edit(_CreateEdit):
url_name = 'def-jms-wmq-edit'
form_prefix = 'edit-'
service_name = 'zato.definition.jms-wmq.edit'
class Delete(_Delete):
url_name = 'def-jms-wmq-delete'
error_message = 'Could not delete the JMS WebSphere MQ definition'
service_name = 'zato.definition.jms-wmq.delete'
| 0.008578 |
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.conf as conf
from azurelinuxagent.common.exception import ResourceDiskError
from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler
class FreeBSDResourceDiskHandler(ResourceDiskHandler):
"""
This class handles resource disk mounting for FreeBSD.
The resource disk locates at following slot:
scbus2 on blkvsc1 bus 0:
<Msft Virtual Disk 1.0> at scbus2 target 1 lun 0 (da1,pass2)
There are 2 variations based on partition table type:
1. MBR: The resource disk partition is /dev/da1s1
2. GPT: The resource disk partition is /dev/da1p2, /dev/da1p1 is for reserved usage.
"""
def __init__(self):
super(FreeBSDResourceDiskHandler, self).__init__()
@staticmethod
def parse_gpart_list(data):
dic = {}
for line in data.split('\n'):
if line.find("Geom name: ") != -1:
geom_name = line[11:]
elif line.find("scheme: ") != -1:
dic[geom_name] = line[8:]
return dic
def mount_resource_disk(self, mount_point):
fs = self.fs
if fs != 'ufs':
raise ResourceDiskError("Unsupported filesystem type:{0}, only ufs is supported.".format(fs))
# 1. Detect device
err, output = shellutil.run_get_output('gpart list')
if err:
raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output))
disks = self.parse_gpart_list(output)
device = self.osutil.device_for_ide_port(1)
if device is None or not device in disks:
# fallback logic to find device
err, output = shellutil.run_get_output('camcontrol periphlist 2:1:0')
if err:
# try again on "3:1:0"
err, output = shellutil.run_get_output('camcontrol periphlist 3:1:0')
if err:
raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output))
# 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n'
for line in output.split('\n'):
index = line.find(':')
if index > 0:
geom_name = line[:index]
if geom_name in disks:
device = geom_name
break
if not device:
raise ResourceDiskError("Unable to detect resource disk device.")
logger.info('Resource disk device {0} found.', device)
# 2. Detect partition
partition_table_type = disks[device]
if partition_table_type == 'MBR':
provider_name = device + 's1'
elif partition_table_type == 'GPT':
provider_name = device + 'p2'
else:
raise ResourceDiskError("Unsupported partition table type:{0}".format(output))
err, output = shellutil.run_get_output('gpart show -p {0}'.format(device))
if err or output.find(provider_name) == -1:
raise ResourceDiskError("Resource disk partition not found.")
partition = '/dev/' + provider_name
logger.info('Resource disk partition {0} found.', partition)
# 3. Mount partition
mount_list = shellutil.run_get_output("mount")[1]
existing = self.osutil.get_mount_point(mount_list, partition)
if existing:
logger.info("Resource disk {0} is already mounted", partition)
return existing
fileutil.mkdir(mount_point, mode=0o755)
mount_cmd = 'mount -t {0} {1} {2}'.format(fs, partition, mount_point)
err = shellutil.run(mount_cmd, chk_err=False)
if err:
logger.info('Creating {0} filesystem on partition {1}'.format(fs, partition))
err, output = shellutil.run_get_output('newfs -U {0}'.format(partition))
if err:
raise ResourceDiskError("Failed to create new filesystem on partition {0}, error:{1}"
.format(partition, output))
err, output = shellutil.run_get_output(mount_cmd, chk_err=False)
if err:
raise ResourceDiskError("Failed to mount partition {0}, error {1}".format(partition, output))
logger.info("Resource disk partition {0} is mounted at {1} with fstype {2}", partition, mount_point, fs)
return mount_point
def create_swap_space(self, mount_point, size_mb):
size_kb = size_mb * 1024
size = size_kb * 1024
swapfile = os.path.join(mount_point, 'swapfile')
swaplist = shellutil.run_get_output("swapctl -l")[1]
if swapfile in swaplist \
and os.path.isfile(swapfile) \
and os.path.getsize(swapfile) == size:
logger.info("Swap already enabled")
return
if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size:
logger.info("Remove old swap file")
shellutil.run("swapoff -a", chk_err=False)
os.remove(swapfile)
if not os.path.isfile(swapfile):
logger.info("Create swap file")
self.mkfile(swapfile, size_kb * 1024)
mddevice = shellutil.run_get_output("mdconfig -a -t vnode -f {0}".format(swapfile))[1].rstrip()
shellutil.run("chmod 0600 /dev/{0}".format(mddevice))
if conf.get_resourcedisk_enable_swap_encryption():
shellutil.run("kldload aesni")
shellutil.run("kldload cryptodev")
shellutil.run("kldload geom_eli")
shellutil.run("geli onetime -e AES-XTS -l 256 -d /dev/{0}".format(mddevice))
shellutil.run("chmod 0600 /dev/{0}.eli".format(mddevice))
if shellutil.run("swapon /dev/{0}.eli".format(mddevice)):
raise ResourceDiskError("/dev/{0}.eli".format(mddevice))
logger.info("Enabled {0}KB of swap at /dev/{1}.eli ({2})".format(size_kb, mddevice, swapfile))
else:
if shellutil.run("swapon /dev/{0}".format(mddevice)):
raise ResourceDiskError("/dev/{0}".format(mddevice))
logger.info("Enabled {0}KB of swap at /dev/{1} ({2})".format(size_kb, mddevice, swapfile))
| 0.003254 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import gettext
import iso8601
import netaddr
from oslo_context import context
from oslo_utils import timeutils
import six
from ironic.common import exception
from ironic.objects import base
from ironic.objects import utils
from ironic.tests import base as test_base
gettext.install('ironic')
class MyObj(base.IronicObject):
VERSION = '1.5'
fields = {'foo': int,
'bar': str,
'missing': str,
}
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(context)
obj.foo = 1
obj.bar = 'bar'
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def update_test(self, context):
if context.tenant == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def get(cls, *args, **kwargs):
pass
class TestSubclassedObject(MyObj):
fields = {'new_field': str}
class TestMetaclass(test_base.TestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.IronicObjectMetaclass)
class NewBaseClass(object):
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Test1(NewBaseClass):
@staticmethod
def obj_name():
return 'fake1'
class Test2(NewBaseClass):
pass
class Test2v2(NewBaseClass):
@staticmethod
def obj_name():
return 'Test2'
expected = {'fake1': [Test1], 'Test2': [Test2, Test2v2]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Test1._obj_classes)
self.assertEqual(expected, Test2._obj_classes)
class TestUtils(test_base.TestCase):
def test_datetime_or_none(self):
naive_dt = datetime.datetime.now()
dt = timeutils.parse_isotime(timeutils.isotime(naive_dt))
self.assertEqual(utils.datetime_or_none(dt), dt)
self.assertEqual(utils.datetime_or_none(dt),
naive_dt.replace(tzinfo=iso8601.iso8601.Utc(),
microsecond=0))
self.assertIsNone(utils.datetime_or_none(None))
self.assertRaises(ValueError, utils.datetime_or_none, 'foo')
def test_datetime_or_str_or_none(self):
dts = timeutils.isotime()
dt = timeutils.parse_isotime(dts)
self.assertEqual(utils.datetime_or_str_or_none(dt), dt)
self.assertIsNone(utils.datetime_or_str_or_none(None))
self.assertEqual(utils.datetime_or_str_or_none(dts), dt)
self.assertRaises(ValueError, utils.datetime_or_str_or_none, 'foo')
def test_int_or_none(self):
self.assertEqual(utils.int_or_none(1), 1)
self.assertEqual(utils.int_or_none('1'), 1)
self.assertIsNone(utils.int_or_none(None))
self.assertRaises(ValueError, utils.int_or_none, 'foo')
def test_str_or_none(self):
class Obj(object):
pass
self.assertEqual(utils.str_or_none('foo'), 'foo')
self.assertEqual(utils.str_or_none(1), '1')
self.assertIsNone(utils.str_or_none(None))
def test_ip_or_none(self):
ip4 = netaddr.IPAddress('1.2.3.4', 4)
ip6 = netaddr.IPAddress('1::2', 6)
self.assertEqual(utils.ip_or_none(4)('1.2.3.4'), ip4)
self.assertEqual(utils.ip_or_none(6)('1::2'), ip6)
self.assertIsNone(utils.ip_or_none(4)(None))
self.assertIsNone(utils.ip_or_none(6)(None))
self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(4), 'foo')
self.assertRaises(netaddr.AddrFormatError, utils.ip_or_none(6), 'foo')
def test_dt_serializer(self):
class Obj(object):
foo = utils.dt_serializer('bar')
obj = Obj()
obj.bar = timeutils.parse_isotime('1955-11-05T00:00:00Z')
self.assertEqual('1955-11-05T00:00:00Z', obj.foo())
obj.bar = None
self.assertIsNone(obj.foo())
obj.bar = 'foo'
self.assertRaises(AttributeError, obj.foo)
def test_dt_deserializer(self):
dt = timeutils.parse_isotime('1955-11-05T00:00:00Z')
self.assertEqual(utils.dt_deserializer(None, timeutils.isotime(dt)),
dt)
self.assertIsNone(utils.dt_deserializer(None, None))
self.assertRaises(ValueError, utils.dt_deserializer, None, 'foo')
def test_obj_to_primitive_list(self):
class MyList(base.ObjectListBase, base.IronicObject):
pass
mylist = MyList(self.context)
mylist.objects = [1, 2, 3]
self.assertEqual([1, 2, 3], base.obj_to_primitive(mylist))
def test_obj_to_primitive_dict(self):
myobj = MyObj(self.context)
myobj.foo = 1
myobj.bar = 'foo'
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.IronicObject):
pass
mylist = MyList(self.context)
mylist.objects = [MyObj(self.context), MyObj(self.context)]
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
class _BaseTestCase(test_base.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
base.IronicObject.indirection_api = None
def assertRemotes(self):
self.assertEqual([], self.remote_object_calls)
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.IronicObject.indirection_api
base.IronicObject.indirection_api = None
yield
base.IronicObject.indirection_api = _api
class _TestObject(object):
def test_hydration_type_error(self):
primitive = {'ironic_object.name': 'MyObj',
'ironic_object.namespace': 'ironic',
'ironic_object.version': '1.5',
'ironic_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'ironic_object.name': 'MyObj',
'ironic_object.namespace': 'ironic',
'ironic_object.version': '1.5',
'ironic_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(1, obj.foo)
def test_hydration_bad_ns(self):
primitive = {'ironic_object.name': 'MyObj',
'ironic_object.namespace': 'foo',
'ironic_object.version': '1.5',
'ironic_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_dehydration(self):
expected = {'ironic_object.name': 'MyObj',
'ironic_object.namespace': 'ironic',
'ironic_object.version': '1.5',
'ironic_object.data': {'foo': 1}}
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual(expected, obj.obj_to_primitive())
def test_get_updates(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_object_property(self):
obj = MyObj(self.context, foo=1)
self.assertEqual(1, obj.foo)
def test_object_property_type_error(self):
obj = MyObj(self.context)
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_object_dict_syntax(self):
obj = MyObj(self.context)
obj.foo = 123
obj.bar = 'bar'
self.assertEqual(123, obj['foo'])
self.assertEqual([('bar', 'bar'), ('foo', 123)],
sorted(obj.items(), key=lambda x: x[0]))
self.assertEqual([('bar', 'bar'), ('foo', 123)],
sorted(list(obj.iteritems()), key=lambda x: x[0]))
def test_load(self):
obj = MyObj(self.context)
self.assertEqual('loaded!', obj.bar)
def test_load_in_base(self):
class Foo(base.IronicObject):
fields = {'foobar': int}
obj = Foo(self.context)
# NOTE(danms): Can't use assertRaisesRegexp() because of py26
raised = False
try:
obj.foobar
except NotImplementedError as ex:
raised = True
self.assertTrue(raised)
self.assertTrue('foobar' in str(ex))
def test_loaded_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 1
obj.obj_reset_changes()
self.assertEqual('loaded!', obj.bar)
expected = {'ironic_object.name': 'MyObj',
'ironic_object.namespace': 'ironic',
'ironic_object.version': '1.5',
'ironic_object.changes': ['bar'],
'ironic_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(expected, obj.obj_to_primitive())
def test_changes_in_primitive(self):
obj = MyObj(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
primitive = obj.obj_to_primitive()
self.assertTrue('ironic_object.changes' in primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(set(['foo']), obj2.obj_what_changed())
obj2.obj_reset_changes()
self.assertEqual(set(), obj2.obj_what_changed())
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.IronicObject.obj_class_from_name, 'foo', '1.0')
def test_with_alternate_context(self):
ctxt1 = context.RequestContext('foo', 'foo')
ctxt2 = context.RequestContext('bar', tenant='alternate')
obj = MyObj.query(ctxt1)
obj.update_test(ctxt2)
self.assertEqual('alternate-context', obj.bar)
self.assertRemotes()
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj.update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.update_test(self.context)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.save()
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(123, obj.foo)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(set(['foo']), obj.obj_what_changed())
obj.refresh()
self.assertEqual(set([]), obj.obj_what_changed())
self.assertEqual(321, obj.foo)
self.assertEqual('refreshed', obj.bar)
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(set(['bar']), obj.obj_what_changed())
obj.modify_save_modify(self.context)
self.assertEqual(set(['foo']), obj.obj_what_changed())
self.assertEqual(42, obj.foo)
self.assertEqual('meow', obj.bar)
self.assertRemotes()
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual('bar', obj.bar)
result = obj.marco()
self.assertEqual('polo', result)
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(1, obj.foo)
obj.update_test()
self.assertEqual('updated', obj.bar)
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(self.context)
obj.created_at = dt
obj.updated_at = dt
expected = {'ironic_object.name': 'MyObj',
'ironic_object.namespace': 'ironic',
'ironic_object.version': '1.5',
'ironic_object.changes':
['created_at', 'updated_at'],
'ironic_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
}
}
actual = obj.obj_to_primitive()
# ironic_object.changes is built from a set and order is undefined
self.assertEqual(sorted(expected['ironic_object.changes']),
sorted(actual['ironic_object.changes']))
del expected['ironic_object.changes'], actual['ironic_object.changes']
self.assertEqual(expected, actual)
def test_contains(self):
obj = MyObj(self.context)
self.assertFalse('foo' in obj)
obj.foo = 1
self.assertTrue('foo' in obj)
self.assertFalse('does_not_exist' in obj)
def test_obj_attr_is_set(self):
obj = MyObj(self.context, foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(self.context, foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.IronicObject.fields.keys()
myobj_fields = ['foo', 'bar', 'missing'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_get_changes(self):
obj = MyObj(self.context)
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.IronicObject):
fields = {'foo': int}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj(self.context)
self.assertEqual(set(['created_at', 'updated_at', 'foo', 'bar']),
set(obj.obj_fields))
def test_obj_constructor(self):
obj = MyObj(self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
class TestObject(_LocalTest, _TestObject):
pass
class TestObjectListBase(test_base.TestCase):
def test_list_like_operations(self):
class Foo(base.ObjectListBase, base.IronicObject):
pass
objlist = Foo(self.context)
objlist._context = 'foo'
objlist.objects = [1, 2, 3]
self.assertEqual(list(objlist), objlist.objects)
self.assertEqual(3, len(objlist))
self.assertIn(2, objlist)
self.assertEqual([1], list(objlist[:1]))
self.assertEqual('foo', objlist[:1]._context)
self.assertEqual(3, objlist[2])
self.assertEqual(1, objlist.count(1))
self.assertEqual(1, objlist.index(2))
def test_serialization(self):
class Foo(base.ObjectListBase, base.IronicObject):
pass
class Bar(base.IronicObject):
fields = {'foo': str}
obj = Foo(self.context)
obj.objects = []
for i in 'abc':
bar = Bar(self.context)
bar.foo = i
obj.objects.append(bar)
obj2 = base.IronicObject.obj_from_primitive(obj.obj_to_primitive())
self.assertFalse(obj is obj2)
self.assertEqual([x.foo for x in obj],
[y.foo for y in obj2])
def _test_object_list_version_mappings(self, list_obj_class):
# Figure out what sort of object this list is for
list_field = list_obj_class.fields['objects']
item_obj_field = list_field._type._element_type
item_obj_name = item_obj_field._type._obj_name
# Look through all object classes of this type and make sure that
# the versions we find are covered by the parent list class
for item_class in base.IronicObject._obj_classes[item_obj_name]:
self.assertIn(
item_class.VERSION,
list_obj_class.child_versions.values())
def test_object_version_mappings(self):
# Find all object list classes and make sure that they at least handle
# all the current object versions
for obj_classes in base.IronicObject._obj_classes.values():
for obj_class in obj_classes:
if issubclass(obj_class, base.ObjectListBase):
self._test_object_list_version_mappings(obj_class)
def test_list_changes(self):
class Foo(base.ObjectListBase, base.IronicObject):
pass
class Bar(base.IronicObject):
fields = {'foo': str}
obj = Foo(self.context, objects=[])
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects.append(Bar(self.context, foo='test'))
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.obj_reset_changes()
# This should still look dirty because the child is dirty
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects[0].obj_reset_changes()
# This should now look clean because the child is clean
self.assertEqual(set(), obj.obj_what_changed())
class TestObjectSerializer(test_base.TestCase):
def test_serialize_entity_primitive(self):
ser = base.IronicObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.IronicObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_object_serialization(self):
ser = base.IronicObjectSerializer()
obj = MyObj(self.context)
primitive = ser.serialize_entity(self.context, obj)
self.assertTrue('ironic_object.name' in primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.IronicObjectSerializer()
obj = MyObj(self.context)
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertFalse(isinstance(item, base.IronicObject))
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
| 0.000044 |
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_security_group_rule
short_description: Add/Delete rule from an existing security group
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove rule from an existing security group
options:
security_group:
description:
- Name or ID of the security group
required: true
protocol:
description:
- IP protocols TCP UDP ICMP 112 (VRRP)
choices: ['tcp', 'udp', 'icmp', '112', None]
default: None
port_range_min:
description:
- Starting port
required: false
default: None
port_range_max:
description:
- Ending port
required: false
default: None
remote_ip_prefix:
description:
- Source IP address(es) in CIDR notation (exclusive with remote_group)
required: false
remote_group:
description:
- Name or ID of the Security group to link (exclusive with
remote_ip_prefix)
required: false
ethertype:
description:
- Must be IPv4 or IPv6, and addresses represented in CIDR must
match the ingress or egress rules. Not all providers support IPv6.
choices: ['IPv4', 'IPv6']
default: IPv4
direction:
description:
- The direction in which the security group rule is applied. Not
all providers support egress.
choices: ['egress', 'ingress']
default: ingress
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a security group rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 80
port_range_max: 80
remote_ip_prefix: 0.0.0.0/0
# Create a security group rule for ping
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
remote_ip_prefix: 0.0.0.0/0
# Another way to create the ping rule
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: icmp
port_range_min: -1
port_range_max: -1
remote_ip_prefix: 0.0.0.0/0
# Create a TCP rule covering all ports
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
port_range_min: 1
port_range_max: 65535
remote_ip_prefix: 0.0.0.0/0
# Another way to create the TCP rule above (defaults to all ports)
- os_security_group_rule:
cloud: mordred
security_group: foo
protocol: tcp
remote_ip_prefix: 0.0.0.0/0
# Create a rule for VRRP with numbered protocol 112
- os_security_group_rule:
security_group: loadbalancer_sg
protocol: 112
remote_group: loadbalancer-node_sg
'''
RETURN = '''
id:
description: Unique rule UUID.
type: string
direction:
description: The direction in which the security group rule is applied.
type: string
sample: 'egress'
ethertype:
description: One of IPv4 or IPv6.
type: string
sample: 'IPv4'
port_range_min:
description: The minimum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
port_range_max:
description: The maximum port number in the range that is matched by
the security group rule.
type: int
sample: 8000
protocol:
description: The protocol that is matched by the security group rule.
type: string
sample: 'tcp'
remote_ip_prefix:
description: The remote IP prefix to be associated with this security group rule.
type: string
sample: '0.0.0.0/0'
security_group_id:
description: The security group ID to associate with this security group rule.
type: string
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _ports_match(protocol, module_min, module_max, rule_min, rule_max):
"""
Capture the complex port matching logic.
The port values coming in for the module might be -1 (for ICMP),
which will work only for Nova, but this is handled by shade. Likewise,
they might be None, which works for Neutron, but not Nova. This too is
handled by shade. Since shade will consistently return these port
values as None, we need to convert any -1 values input to the module
to None here for comparison.
For TCP and UDP protocols, None values for both min and max are
represented as the range 1-65535 for Nova, but remain None for
Neutron. Shade returns the full range when Nova is the backend (since
that is how Nova stores them), and None values for Neutron. If None
values are input to the module for both values, then we need to adjust
for comparison.
"""
# Check if the user is supplying -1 for ICMP.
if protocol == 'icmp':
if module_min and int(module_min) == -1:
module_min = None
if module_max and int(module_max) == -1:
module_max = None
# Check if user is supplying None values for full TCP/UDP port range.
if protocol in ['tcp', 'udp'] and module_min is None and module_max is None:
if (rule_min and int(rule_min) == 1
and rule_max and int(rule_max) == 65535):
# (None, None) == (1, 65535)
return True
# Sanity check to make sure we don't have type comparison issues.
if module_min:
module_min = int(module_min)
if module_max:
module_max = int(module_max)
if rule_min:
rule_min = int(rule_min)
if rule_max:
rule_max = int(rule_max)
return module_min == rule_min and module_max == rule_max
def _find_matching_rule(module, secgroup, remotegroup):
"""
Find a rule in the group that matches the module parameters.
:returns: The matching rule dict, or None if no matches.
"""
protocol = module.params['protocol']
remote_ip_prefix = module.params['remote_ip_prefix']
ethertype = module.params['ethertype']
direction = module.params['direction']
remote_group_id = remotegroup['id']
for rule in secgroup['security_group_rules']:
if (protocol == rule['protocol']
and remote_ip_prefix == rule['remote_ip_prefix']
and ethertype == rule['ethertype']
and direction == rule['direction']
and remote_group_id == rule['remote_group_id']
and _ports_match(protocol,
module.params['port_range_min'],
module.params['port_range_max'],
rule['port_range_min'],
rule['port_range_max'])):
return rule
return None
def _system_state_change(module, secgroup, remotegroup):
state = module.params['state']
if secgroup:
rule_exists = _find_matching_rule(module, secgroup, remotegroup)
else:
return False
if state == 'present' and not rule_exists:
return True
if state == 'absent' and rule_exists:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
security_group = dict(required=True),
# NOTE(Shrews): None is an acceptable protocol value for
# Neutron, but Nova will balk at this.
protocol = dict(default=None,
choices=[None, 'tcp', 'udp', 'icmp', '112']),
port_range_min = dict(required=False, type='int'),
port_range_max = dict(required=False, type='int'),
remote_ip_prefix = dict(required=False, default=None),
remote_group = dict(required=False, default=None),
ethertype = dict(default='IPv4',
choices=['IPv4', 'IPv6']),
direction = dict(default='ingress',
choices=['egress', 'ingress']),
state = dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['remote_ip_prefix', 'remote_group'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
security_group = module.params['security_group']
remote_group = module.params['remote_group']
changed = False
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(security_group)
if remote_group:
remotegroup = cloud.get_security_group(remote_group)
else:
remotegroup = { 'id' : None }
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup, remotegroup))
if state == 'present':
if not secgroup:
module.fail_json(msg='Could not find security group %s' %
security_group)
rule = _find_matching_rule(module, secgroup, remotegroup)
if not rule:
rule = cloud.create_security_group_rule(
secgroup['id'],
port_range_min=module.params['port_range_min'],
port_range_max=module.params['port_range_max'],
protocol=module.params['protocol'],
remote_ip_prefix=module.params['remote_ip_prefix'],
remote_group_id=remotegroup['id'],
direction=module.params['direction'],
ethertype=module.params['ethertype']
)
changed = True
module.exit_json(changed=changed, rule=rule, id=rule['id'])
if state == 'absent' and secgroup:
rule = _find_matching_rule(module, secgroup, remotegroup)
if rule:
cloud.delete_security_group_rule(rule['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| 0.003196 |
#!/usr/bin/python
import random
def makeTerrainData(n_points=1000):
###############################################################################
### make the toy dataset
random.seed(42)
grade = [random.random() for ii in range(0,n_points)]
bumpy = [random.random() for ii in range(0,n_points)]
error = [random.random() for ii in range(0,n_points)]
y = [round(grade[ii]*bumpy[ii]+0.3+0.1*error[ii]) for ii in range(0,n_points)]
for ii in range(0, len(y)):
if grade[ii]>0.8 or bumpy[ii]>0.8:
y[ii] = 1.0
### split into train/test sets
X = [[gg, ss] for gg, ss in zip(grade, bumpy)]
split = int(0.75*n_points)
X_train = X[0:split]
X_test = X[split:]
y_train = y[0:split]
y_test = y[split:]
grade_sig = [X_train[ii][0] for ii in range(0, len(X_train)) if y_train[ii]==0]
bumpy_sig = [X_train[ii][1] for ii in range(0, len(X_train)) if y_train[ii]==0]
grade_bkg = [X_train[ii][0] for ii in range(0, len(X_train)) if y_train[ii]==1]
bumpy_bkg = [X_train[ii][1] for ii in range(0, len(X_train)) if y_train[ii]==1]
training_data = {"fast":{"grade":grade_sig, "bumpiness":bumpy_sig}
, "slow":{"grade":grade_bkg, "bumpiness":bumpy_bkg}}
grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==0]
bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==0]
grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==1]
bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==1]
test_data = {"fast":{"grade":grade_sig, "bumpiness":bumpy_sig}
, "slow":{"grade":grade_bkg, "bumpiness":bumpy_bkg}}
print "return"
return X_train, y_train, X_test, y_test
| 0.026689 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sources and sinks.
A Source manages record-oriented data input from a particular kind of source
(e.g. a set of files, a database table, etc.). The reader() method of a source
returns a reader object supporting the iterator protocol; iteration yields
raw records of unprocessed, serialized data.
A Sink manages record-oriented data output to a particular kind of sink
(e.g. a set of files, a database table, etc.). The writer() method of a sink
returns a writer object supporting writing records of serialized data to
the sink.
"""
from collections import namedtuple
import logging
import random
import uuid
from apache_beam import pvalue
from apache_beam import coders
from apache_beam.pvalue import AsIter
from apache_beam.pvalue import AsSingleton
from apache_beam.transforms import core
from apache_beam.transforms import ptransform
from apache_beam.transforms import window
from apache_beam.transforms.display import HasDisplayData
from apache_beam.transforms.display import DisplayDataItem
# Encapsulates information about a bundle of a source generated when method
# BoundedSource.split() is invoked.
# This is a named 4-tuple that has following fields.
# * weight - a number that represents the size of the bundle. This value will
# be used to compare the relative sizes of bundles generated by the
# current source.
# The weight returned here could be specified using a unit of your
# choice (for example, bundles of sizes 100MB, 200MB, and 700MB may
# specify weights 100, 200, 700 or 1, 2, 7) but all bundles of a
# source should specify the weight using the same unit.
# * source - a BoundedSource object for the bundle.
# * start_position - starting position of the bundle
# * stop_position - ending position of the bundle.
#
# Type for start and stop positions are specific to the bounded source and must
# be consistent throughout.
SourceBundle = namedtuple(
'SourceBundle',
'weight source start_position stop_position')
class BoundedSource(HasDisplayData):
"""A source that reads a finite amount of input records.
This class defines following operations which can be used to read the source
efficiently.
* Size estimation - method ``estimate_size()`` may return an accurate
estimation in bytes for the size of the source.
* Splitting into bundles of a given size - method ``split()`` can be used to
split the source into a set of sub-sources (bundles) based on a desired
bundle size.
* Getting a RangeTracker - method ``get_range_tracker()`` should return a
``RangeTracker`` object for a given position range for the position type
of the records returned by the source.
* Reading the data - method ``read()`` can be used to read data from the
source while respecting the boundaries defined by a given
``RangeTracker``.
A runner will perform reading the source in two steps.
(1) Method ``get_range_tracker()`` will be invoked with start and end
positions to obtain a ``RangeTracker`` for the range of positions the
runner intends to read. Source must define a default initial start and end
position range. These positions must be used if the start and/or end
positions passed to the method ``get_range_tracker()`` are ``None``
(2) Method read() will be invoked with the ``RangeTracker`` obtained in the
previous step.
**Mutability**
A ``BoundedSource`` object should not be mutated while
its methods (for example, ``read()``) are being invoked by a runner. Runner
implementations may invoke methods of ``BoundedSource`` objects through
multi-threaded and/or reentrant execution modes.
"""
def estimate_size(self):
"""Estimates the size of source in bytes.
An estimate of the total size (in bytes) of the data that would be read
from this source. This estimate is in terms of external storage size,
before performing decompression or other processing.
Returns:
estimated size of the source if the size can be determined, ``None``
otherwise.
"""
raise NotImplementedError
def split(self, desired_bundle_size, start_position=None, stop_position=None):
"""Splits the source into a set of bundles.
Bundles should be approximately of size ``desired_bundle_size`` bytes.
Args:
desired_bundle_size: the desired size (in bytes) of the bundles returned.
start_position: if specified the given position must be used as the
starting position of the first bundle.
stop_position: if specified the given position must be used as the ending
position of the last bundle.
Returns:
an iterator of objects of type 'SourceBundle' that gives information about
the generated bundles.
"""
raise NotImplementedError
def get_range_tracker(self, start_position, stop_position):
"""Returns a RangeTracker for a given position range.
Framework may invoke ``read()`` method with the RangeTracker object returned
here to read data from the source.
Args:
start_position: starting position of the range. If 'None' default start
position of the source must be used.
stop_position: ending position of the range. If 'None' default stop
position of the source must be used.
Returns:
a ``RangeTracker`` for the given position range.
"""
raise NotImplementedError
def read(self, range_tracker):
"""Returns an iterator that reads data from the source.
The returned set of data must respect the boundaries defined by the given
``RangeTracker`` object. For example:
* Returned set of data must be for the range
``[range_tracker.start_position, range_tracker.stop_position)``. Note
that a source may decide to return records that start after
``range_tracker.stop_position``. See documentation in class
``RangeTracker`` for more details. Also, note that framework might
invoke ``range_tracker.try_split()`` to perform dynamic split
operations. range_tracker.stop_position may be updated
dynamically due to successful dynamic split operations.
* Method ``range_tracker.try_split()`` must be invoked for every record
that starts at a split point.
* Method ``range_tracker.record_current_position()`` may be invoked for
records that do not start at split points.
Args:
range_tracker: a ``RangeTracker`` whose boundaries must be respected
when reading data from the source. A runner that reads this
source muss pass a ``RangeTracker`` object that is not
``None``.
Returns:
an iterator of data read by the source.
"""
raise NotImplementedError
def default_output_coder(self):
"""Coder that should be used for the records returned by the source.
Should be overridden by sources that produce objects that can be encoded
more efficiently than pickling.
"""
return coders.registry.get_coder(object)
class RangeTracker(object):
"""A thread safe object used by Dataflow source framework.
A Dataflow source is defined using a ''BoundedSource'' and a ''RangeTracker''
pair. A ''RangeTracker'' is used by Dataflow source framework to perform
dynamic work rebalancing of position-based sources.
**Position-based sources**
A position-based source is one where the source can be described by a range
of positions of an ordered type and the records returned by the reader can be
described by positions of the same type.
In case a record occupies a range of positions in the source, the most
important thing about the record is the position where it starts.
Defining the semantics of positions for a source is entirely up to the source
class, however the chosen definitions have to obey certain properties in order
to make it possible to correctly split the source into parts, including
dynamic splitting. Two main aspects need to be defined:
1. How to assign starting positions to records.
2. Which records should be read by a source with a range '[A, B)'.
Moreover, reading a range must be *efficient*, i.e., the performance of
reading a range should not significantly depend on the location of the range.
For example, reading the range [A, B) should not require reading all data
before 'A'.
The sections below explain exactly what properties these definitions must
satisfy, and how to use a ``RangeTracker`` with a properly defined source.
**Properties of position-based sources**
The main requirement for position-based sources is *associativity*: reading
records from '[A, B)' and records from '[B, C)' should give the same
records as reading from '[A, C)', where 'A <= B <= C'. This property
ensures that no matter how a range of positions is split into arbitrarily many
sub-ranges, the total set of records described by them stays the same.
The other important property is how the source's range relates to positions of
records in the source. In many sources each record can be identified by a
unique starting position. In this case:
* All records returned by a source '[A, B)' must have starting positions in
this range.
* All but the last record should end within this range. The last record may or
may not extend past the end of the range.
* Records should not overlap.
Such sources should define "read '[A, B)'" as "read from the first record
starting at or after 'A', up to but not including the first record starting
at or after 'B'".
Some examples of such sources include reading lines or CSV from a text file,
reading keys and values from a BigTable, etc.
The concept of *split points* allows to extend the definitions for dealing
with sources where some records cannot be identified by a unique starting
position.
In all cases, all records returned by a source '[A, B)' must *start* at or
after 'A'.
**Split points**
Some sources may have records that are not directly addressable. For example,
imagine a file format consisting of a sequence of compressed blocks. Each
block can be assigned an offset, but records within the block cannot be
directly addressed without decompressing the block. Let us refer to this
hypothetical format as <i>CBF (Compressed Blocks Format)</i>.
Many such formats can still satisfy the associativity property. For example,
in CBF, reading '[A, B)' can mean "read all the records in all blocks whose
starting offset is in '[A, B)'".
To support such complex formats, we introduce the notion of *split points*. We
say that a record is a split point if there exists a position 'A' such that
the record is the first one to be returned when reading the range
'[A, infinity)'. In CBF, the only split points would be the first records
in each block.
Split points allow us to define the meaning of a record's position and a
source's range in all cases:
* For a record that is at a split point, its position is defined to be the
largest 'A' such that reading a source with the range '[A, infinity)'
returns this record.
* Positions of other records are only required to be non-decreasing.
* Reading the source '[A, B)' must return records starting from the first
split point at or after 'A', up to but not including the first split point
at or after 'B'. In particular, this means that the first record returned
by a source MUST always be a split point.
* Positions of split points must be unique.
As a result, for any decomposition of the full range of the source into
position ranges, the total set of records will be the full set of records in
the source, and each record will be read exactly once.
**Consumed positions**
As the source is being read, and records read from it are being passed to the
downstream transforms in the pipeline, we say that positions in the source are
being *consumed*. When a reader has read a record (or promised to a caller
that a record will be returned), positions up to and including the record's
start position are considered *consumed*.
Dynamic splitting can happen only at *unconsumed* positions. If the reader
just returned a record at offset 42 in a file, dynamic splitting can happen
only at offset 43 or beyond, as otherwise that record could be read twice (by
the current reader and by a reader of the task starting at 43).
"""
def start_position(self):
"""Returns the starting position of the current range, inclusive."""
raise NotImplementedError(type(self))
def stop_position(self):
"""Returns the ending position of the current range, exclusive."""
raise NotImplementedError(type(self))
def try_claim(self, position): # pylint: disable=unused-argument
"""Atomically determines if a record at a split point is within the range.
This method should be called **if and only if** the record is at a split
point. This method may modify the internal state of the ``RangeTracker`` by
updating the last-consumed position to ``position``.
** Thread safety **
This method along with several other methods of this class may be invoked by
multiple threads, hence must be made thread-safe, e.g. by using a single
lock object.
Args:
position: starting position of a record being read by a source.
Returns:
``True``, if the given position falls within the current range, returns
``False`` otherwise.
"""
raise NotImplementedError
def set_current_position(self, position):
"""Updates the last-consumed position to the given position.
A source may invoke this method for records that do not start at split
points. This may modify the internal state of the ``RangeTracker``. If the
record starts at a split point, method ``try_claim()`` **must** be invoked
instead of this method.
Args:
position: starting position of a record being read by a source.
"""
raise NotImplementedError
def position_at_fraction(self, fraction):
"""Returns the position at the given fraction.
Given a fraction within the range [0.0, 1.0) this method will return the
position at the given fraction compared to the position range
[self.start_position, self.stop_position).
** Thread safety **
This method along with several other methods of this class may be invoked by
multiple threads, hence must be made thread-safe, e.g. by using a single
lock object.
Args:
fraction: a float value within the range [0.0, 1.0).
Returns:
a position within the range [self.start_position, self.stop_position).
"""
raise NotImplementedError
def try_split(self, position):
"""Atomically splits the current range.
Determines a position to split the current range, split_position, based on
the given position. In most cases split_position and position will be the
same.
Splits the current range '[self.start_position, self.stop_position)'
into a "primary" part '[self.start_position, split_position)' and a
"residual" part '[split_position, self.stop_position)', assuming the
current last-consumed position is within
'[self.start_position, split_position)' (i.e., split_position has not been
consumed yet).
If successful, updates the current range to be the primary and returns a
tuple (split_position, split_fraction). split_fraction should be the
fraction of size of range '[self.start_position, split_position)' compared
to the original (before split) range
'[self.start_position, self.stop_position)'.
If the split_position has already been consumed, returns ``None``.
** Thread safety **
This method along with several other methods of this class may be invoked by
multiple threads, hence must be made thread-safe, e.g. by using a single
lock object.
Args:
position: suggested position where the current range should try to
be split at.
Returns:
a tuple containing the split position and split fraction if split is
successful. Returns ``None`` otherwise.
"""
raise NotImplementedError
def fraction_consumed(self):
"""Returns the approximate fraction of consumed positions in the source.
** Thread safety **
This method along with several other methods of this class may be invoked by
multiple threads, hence must be made thread-safe, e.g. by using a single
lock object.
Returns:
the approximate fraction of positions that have been consumed by
successful 'try_split()' and 'report_current_position()' calls, or
0.0 if no such calls have happened.
"""
raise NotImplementedError
class Sink(HasDisplayData):
"""A resource that can be written to using the ``df.io.Write`` transform.
Here ``df`` stands for Dataflow Python code imported in following manner.
``import apache_beam as beam``.
A parallel write to an ``iobase.Sink`` consists of three phases:
1. A sequential *initialization* phase (e.g., creating a temporary output
directory, etc.)
2. A parallel write phase where workers write *bundles* of records
3. A sequential *finalization* phase (e.g., committing the writes, merging
output files, etc.)
For exact definition of a Dataflow bundle please see
https://cloud.google.com/dataflow/faq.
Implementing a new sink requires extending two classes.
1. iobase.Sink
``iobase.Sink`` is an immutable logical description of the location/resource
to write to. Depending on the type of sink, it may contain fields such as the
path to an output directory on a filesystem, a database table name,
etc. ``iobase.Sink`` provides methods for performing a write operation to the
sink described by it. To this end, implementors of an extension of
``iobase.Sink`` must implement three methods:
``initialize_write()``, ``open_writer()``, and ``finalize_write()``.
2. iobase.Writer
``iobase.Writer`` is used to write a single bundle of records. An
``iobase.Writer`` defines two methods: ``write()`` which writes a
single record from the bundle and ``close()`` which is called once
at the end of writing a bundle.
See also ``df.io.fileio.FileSink`` which provides a simpler API for writing
sinks that produce files.
**Execution of the Write transform**
``initialize_write()`` and ``finalize_write()`` are conceptually called once:
at the beginning and end of a ``Write`` transform. However, implementors must
ensure that these methods are *idempotent*, as they may be called multiple
times on different machines in the case of failure/retry or for redundancy.
``initialize_write()`` should perform any initialization that needs to be done
prior to writing to the sink. ``initialize_write()`` may return a result
(let's call this ``init_result``) that contains any parameters it wants to
pass on to its writers about the sink. For example, a sink that writes to a
file system may return an ``init_result`` that contains a dynamically
generated unique directory to which data should be written.
To perform writing of a bundle of elements, Dataflow execution engine will
create an ``iobase.Writer`` using the implementation of
``iobase.Sink.open_writer()``. When invoking ``open_writer()`` execution
engine will provide the ``init_result`` returned by ``initialize_write()``
invocation as well as a *bundle id* (let's call this ``bundle_id``) that is
unique for each invocation of ``open_writer()``.
Execution engine will then invoke ``iobase.Writer.write()`` implementation for
each element that has to be written. Once all elements of a bundle are
written, execution engine will invoke ``iobase.Writer.close()`` implementation
which should return a result (let's call this ``write_result``) that contains
information that encodes the result of the write and, in most cases, some
encoding of the unique bundle id. For example, if each bundle is written to a
unique temporary file, ``close()`` method may return an object that contains
the temporary file name. After writing of all bundles is complete, execution
engine will invoke ``finalize_write()`` implementation. As parameters to this
invocation execution engine will provide ``init_result`` as well as an
iterable of ``write_result``.
The execution of a write transform can be illustrated using following pseudo
code (assume that the outer for loop happens in parallel across many
machines)::
init_result = sink.initialize_write()
write_results = []
for bundle in partition(pcoll):
writer = sink.open_writer(init_result, generate_bundle_id())
for elem in bundle:
writer.write(elem)
write_results.append(writer.close())
sink.finalize_write(init_result, write_results)
**init_result**
Methods of 'iobase.Sink' should agree on the 'init_result' type that will be
returned when initializing the sink. This type can be a client-defined object
or an existing type. The returned type must be picklable using Dataflow coder
``coders.PickleCoder``. Returning an init_result is optional.
**bundle_id**
In order to ensure fault-tolerance, a bundle may be executed multiple times
(e.g., in the event of failure/retry or for redundancy). However, exactly one
of these executions will have its result passed to the
``iobase.Sink.finalize_write()`` method. Each call to
``iobase.Sink.open_writer()`` is passed a unique bundle id when it is called
by the ``WriteImpl`` transform, so even redundant or retried bundles will have
a unique way of identifying their output.
The bundle id should be used to guarantee that a bundle's output is unique.
This uniqueness guarantee is important; if a bundle is to be output to a file,
for example, the name of the file must be unique to avoid conflicts with other
writers. The bundle id should be encoded in the writer result returned by the
writer and subsequently used by the ``finalize_write()`` method to identify
the results of successful writes.
For example, consider the scenario where a Writer writes files containing
serialized records and the ``finalize_write()`` is to merge or rename these
output files. In this case, a writer may use its unique id to name its output
file (to avoid conflicts) and return the name of the file it wrote as its
writer result. The ``finalize_write()`` will then receive an ``Iterable`` of
output file names that it can then merge or rename using some bundle naming
scheme.
**write_result**
``iobase.Writer.close()`` and ``finalize_write()`` implementations must agree
on type of the ``write_result`` object returned when invoking
``iobase.Writer.close()``. This type can be a client-defined object or
an existing type. The returned type must be picklable using Dataflow coder
``coders.PickleCoder``. Returning a ``write_result`` when
``iobase.Writer.close()`` is invoked is optional but if unique
``write_result`` objects are not returned, sink should, guarantee idempotency
when same bundle is written multiple times due to failure/retry or redundancy.
**More information**
For more information on creating new sinks please refer to the official
documentation at
``https://cloud.google.com/dataflow/model/custom-io#creating-sinks``.
"""
def initialize_write(self):
"""Initializes the sink before writing begins.
Invoked before any data is written to the sink.
Please see documentation in ``iobase.Sink`` for an example.
Returns:
An object that contains any sink specific state generated by
initialization. This object will be passed to open_writer() and
finalize_write() methods.
"""
raise NotImplementedError
def open_writer(self, init_result, uid):
"""Opens a writer for writing a bundle of elements to the sink.
Args:
init_result: the result of initialize_write() invocation.
uid: a unique identifier generated by the system.
Returns:
an ``iobase.Writer`` that can be used to write a bundle of records to the
current sink.
"""
raise NotImplementedError
def finalize_write(self, init_result, writer_results):
"""Finalizes the sink after all data is written to it.
Given the result of initialization and an iterable of results from bundle
writes, performs finalization after writing and closes the sink. Called
after all bundle writes are complete.
The bundle write results that are passed to finalize are those returned by
bundles that completed successfully. Although bundles may have been run
multiple times (for fault-tolerance), only one writer result will be passed
to finalize for each bundle. An implementation of finalize should perform
clean up of any failed and successfully retried bundles. Note that these
failed bundles will not have their writer result passed to finalize, so
finalize should be capable of locating any temporary/partial output written
by failed bundles.
If all retries of a bundle fails, the whole pipeline will fail *without*
finalize_write() being invoked.
A best practice is to make finalize atomic. If this is impossible given the
semantics of the sink, finalize should be idempotent, as it may be called
multiple times in the case of failure/retry or for redundancy.
Note that the iteration order of the writer results is not guaranteed to be
consistent if finalize is called multiple times.
Args:
init_result: the result of ``initialize_write()`` invocation.
writer_results: an iterable containing results of ``Writer.close()``
invocations. This will only contain results of successful writes, and
will only contain the result of a single successful write for a given
bundle.
"""
raise NotImplementedError
class Writer(object):
"""Writes a bundle of elements from a ``PCollection`` to a sink.
A Writer ``iobase.Writer.write()`` writes and elements to the sink while
``iobase.Writer.close()`` is called after all elements in the bundle have been
written.
See ``iobase.Sink`` for more detailed documentation about the process of
writing to a sink.
"""
def write(self, value):
"""Writes a value to the sink using the current writer."""
raise NotImplementedError
def close(self):
"""Closes the current writer.
Please see documentation in ``iobase.Sink`` for an example.
Returns:
An object representing the writes that were performed by the current
writer.
"""
raise NotImplementedError
class Read(ptransform.PTransform):
"""A transform that reads a PCollection."""
def __init__(self, source):
"""Initializes a Read transform.
Args:
source: Data source to read from.
"""
super(Read, self).__init__()
self.source = source
def expand(self, pbegin):
assert isinstance(pbegin, pvalue.PBegin)
self.pipeline = pbegin.pipeline
return pvalue.PCollection(self.pipeline)
def get_windowing(self, unused_inputs):
return core.Windowing(window.GlobalWindows())
def _infer_output_coder(self, input_type=None, input_coder=None):
if isinstance(self.source, BoundedSource):
return self.source.default_output_coder()
else:
return self.source.coder
def display_data(self):
return {'source': DisplayDataItem(self.source.__class__,
label='Read Source'),
'source_dd': self.source}
class Write(ptransform.PTransform):
"""A ``PTransform`` that writes to a sink.
A sink should inherit ``iobase.Sink``. Such implementations are
handled using a composite transform that consists of three ``ParDo``s -
(1) a ``ParDo`` performing a global initialization (2) a ``ParDo`` performing
a parallel write and (3) a ``ParDo`` performing a global finalization. In the
case of an empty ``PCollection``, only the global initialization and
finalization will be performed. Currently only batch workflows support custom
sinks.
Example usage::
pcollection | beam.io.Write(MySink())
This returns a ``pvalue.PValue`` object that represents the end of the
Pipeline.
The sink argument may also be a full PTransform, in which case it will be
applied directly. This allows composite sink-like transforms (e.g. a sink
with some pre-processing DoFns) to be used the same as all other sinks.
This transform also supports sinks that inherit ``iobase.NativeSink``. These
are sinks that are implemented natively by the Dataflow service and hence
should not be updated by users. These sinks are processed using a Dataflow
native write transform.
"""
def __init__(self, sink):
"""Initializes a Write transform.
Args:
sink: Data sink to write to.
"""
super(Write, self).__init__()
self.sink = sink
def display_data(self):
return {'sink': self.sink.__class__,
'sink_dd': self.sink}
def expand(self, pcoll):
from apache_beam.runners.google_cloud_dataflow.native_io import iobase as dataflow_io
if isinstance(self.sink, dataflow_io.NativeSink):
# A native sink
return pcoll | 'NativeWrite' >> dataflow_io._NativeWrite(self.sink)
elif isinstance(self.sink, Sink):
# A custom sink
return pcoll | WriteImpl(self.sink)
elif isinstance(self.sink, ptransform.PTransform):
# This allows "composite" sinks to be used like non-composite ones.
return pcoll | self.sink
else:
raise ValueError('A sink must inherit iobase.Sink, iobase.NativeSink, '
'or be a PTransform. Received : %r', self.sink)
class WriteImpl(ptransform.PTransform):
"""Implements the writing of custom sinks."""
def __init__(self, sink):
super(WriteImpl, self).__init__()
self.sink = sink
def expand(self, pcoll):
do_once = pcoll.pipeline | 'DoOnce' >> core.Create([None])
init_result_coll = do_once | 'InitializeWrite' >> core.Map(
lambda _, sink: sink.initialize_write(), self.sink)
if getattr(self.sink, 'num_shards', 0):
min_shards = self.sink.num_shards
if min_shards == 1:
keyed_pcoll = pcoll | core.Map(lambda x: (None, x))
else:
keyed_pcoll = pcoll | core.ParDo(_RoundRobinKeyFn(min_shards))
write_result_coll = (keyed_pcoll
| core.WindowInto(window.GlobalWindows())
| core.GroupByKey()
| 'WriteBundles' >> core.ParDo(
_WriteKeyedBundleDoFn(self.sink),
AsSingleton(init_result_coll)))
else:
min_shards = 1
write_result_coll = (pcoll
| 'WriteBundles' >>
core.ParDo(_WriteBundleDoFn(self.sink),
AsSingleton(init_result_coll))
| 'Pair' >> core.Map(lambda x: (None, x))
| core.WindowInto(window.GlobalWindows())
| core.GroupByKey()
| 'Extract' >> core.FlatMap(lambda x: x[1]))
return do_once | 'finalize_write' >> core.FlatMap(
_finalize_write,
self.sink,
AsSingleton(init_result_coll),
AsIter(write_result_coll),
min_shards)
class _WriteBundleDoFn(core.DoFn):
"""A DoFn for writing elements to an iobase.Writer.
Opens a writer at the first element and closes the writer at finish_bundle().
"""
def __init__(self, sink):
self.writer = None
self.sink = sink
def display_data(self):
return {'sink_dd': self.sink}
def process(self, element, init_result):
if self.writer is None:
self.writer = self.sink.open_writer(init_result, str(uuid.uuid4()))
self.writer.write(element)
def finish_bundle(self):
if self.writer is not None:
yield window.TimestampedValue(self.writer.close(), window.MAX_TIMESTAMP)
class _WriteKeyedBundleDoFn(core.DoFn):
def __init__(self, sink):
self.sink = sink
def display_data(self):
return {'sink_dd': self.sink}
def process(self, element, init_result):
bundle = element
writer = self.sink.open_writer(init_result, str(uuid.uuid4()))
for element in bundle[1]: # values
writer.write(element)
return [window.TimestampedValue(writer.close(), window.MAX_TIMESTAMP)]
def _finalize_write(_, sink, init_result, write_results, min_shards):
write_results = list(write_results)
extra_shards = []
if len(write_results) < min_shards:
logging.debug(
'Creating %s empty shard(s).', min_shards - len(write_results))
for _ in range(min_shards - len(write_results)):
writer = sink.open_writer(init_result, str(uuid.uuid4()))
extra_shards.append(writer.close())
outputs = sink.finalize_write(init_result, write_results + extra_shards)
if outputs:
return (window.TimestampedValue(v, window.MAX_TIMESTAMP) for v in outputs)
class _RoundRobinKeyFn(core.DoFn):
def __init__(self, count):
self.count = count
def start_bundle(self):
self.counter = random.randint(0, self.count - 1)
def process(self, element):
self.counter += 1
if self.counter >= self.count:
self.counter -= self.count
yield self.counter, element
# For backwards compatibility.
# pylint: disable=wrong-import-position
from apache_beam.runners.google_cloud_dataflow.native_io.iobase import *
| 0.002792 |
#!/usr/bin/env python
'''Populate with dummy data.'''
from pull import cursor
from random import randint, random
from collections import namedtuple
sql = '''
INSERT INTO points (
x
, y
, z
, value
) VALUES (
%s
, %s
, %s
, %s
)'''
Point = namedtuple('Point', ['x', 'y', 'z'])
def rand_coord():
return randint(0, 1000)
def rand_point():
return Point(
rand_coord(),
rand_coord(),
rand_coord(),
)
def gen_points(count):
coords = set()
while len(coords) < count:
coords.add(rand_point())
return coords
if __name__ == '__main__':
pre_cur = cursor('pre')
post_cur = cursor('post')
points = gen_points(10000)
for point in points:
pre_value = random() * 1000
post_value = pre_value
# In 1% probability, generate different value
if random() <= 0.01:
# Up to 10% difference
diff = (random() * 0.1) * pre_value
if random() > 0.5:
post_value += diff
else:
post_value -= diff
pre_cur.execute(sql, (point.x, point.y, point.z, pre_value))
post_cur.execute(sql, (point.x, point.y, point.z, post_value))
pre_cur.connection.commit()
post_cur.connection.commit()
| 0.000776 |
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import json
import uuid
import httplib2
from apiclient import discovery
from apiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
# 30 days in milliseconds
_EXPIRATION_MS = 30 * 24 * 60 * 60 * 1000
NUM_RETRIES = 3
def create_big_query():
"""Authenticates with cloud platform and gets a BiqQuery service object
"""
creds = GoogleCredentials.get_application_default()
return discovery.build('bigquery',
'v2',
credentials=creds,
cache_discovery=False)
def create_dataset(biq_query, project_id, dataset_id):
is_success = True
body = {
'datasetReference': {
'projectId': project_id,
'datasetId': dataset_id
}
}
try:
dataset_req = biq_query.datasets().insert(projectId=project_id,
body=body)
dataset_req.execute(num_retries=NUM_RETRIES)
except HttpError as http_error:
if http_error.resp.status == 409:
print('Warning: The dataset %s already exists' % dataset_id)
else:
# Note: For more debugging info, print "http_error.content"
print('Error in creating dataset: %s. Err: %s' %
(dataset_id, http_error))
is_success = False
return is_success
def create_table(big_query, project_id, dataset_id, table_id, table_schema,
description):
fields = [{
'name': field_name,
'type': field_type,
'description': field_description
} for (field_name, field_type, field_description) in table_schema]
return create_table2(big_query, project_id, dataset_id, table_id, fields,
description)
def create_partitioned_table(big_query,
project_id,
dataset_id,
table_id,
table_schema,
description,
partition_type='DAY',
expiration_ms=_EXPIRATION_MS):
"""Creates a partitioned table. By default, a date-paritioned table is created with
each partition lasting 30 days after it was last modified.
"""
fields = [{
'name': field_name,
'type': field_type,
'description': field_description
} for (field_name, field_type, field_description) in table_schema]
return create_table2(big_query, project_id, dataset_id, table_id, fields,
description, partition_type, expiration_ms)
def create_table2(big_query,
project_id,
dataset_id,
table_id,
fields_schema,
description,
partition_type=None,
expiration_ms=None):
is_success = True
body = {
'description': description,
'schema': {
'fields': fields_schema
},
'tableReference': {
'datasetId': dataset_id,
'projectId': project_id,
'tableId': table_id
}
}
if partition_type and expiration_ms:
body["timePartitioning"] = {
"type": partition_type,
"expirationMs": expiration_ms
}
try:
table_req = big_query.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=body)
res = table_req.execute(num_retries=NUM_RETRIES)
print('Successfully created %s "%s"' % (res['kind'], res['id']))
except HttpError as http_error:
if http_error.resp.status == 409:
print('Warning: Table %s already exists' % table_id)
else:
print('Error in creating table: %s. Err: %s' %
(table_id, http_error))
is_success = False
return is_success
def patch_table(big_query, project_id, dataset_id, table_id, fields_schema):
is_success = True
body = {
'schema': {
'fields': fields_schema
},
'tableReference': {
'datasetId': dataset_id,
'projectId': project_id,
'tableId': table_id
}
}
try:
table_req = big_query.tables().patch(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body)
res = table_req.execute(num_retries=NUM_RETRIES)
print('Successfully patched %s "%s"' % (res['kind'], res['id']))
except HttpError as http_error:
print('Error in creating table: %s. Err: %s' % (table_id, http_error))
is_success = False
return is_success
def insert_rows(big_query, project_id, dataset_id, table_id, rows_list):
is_success = True
body = {'rows': rows_list}
try:
insert_req = big_query.tabledata().insertAll(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body)
res = insert_req.execute(num_retries=NUM_RETRIES)
if res.get('insertErrors', None):
print('Error inserting rows! Response: %s' % res)
is_success = False
except HttpError as http_error:
print('Error inserting rows to the table %s' % table_id)
print('Error message: %s' % http_error)
is_success = False
return is_success
def sync_query_job(big_query, project_id, query, timeout=5000):
query_data = {'query': query, 'timeoutMs': timeout}
query_job = None
try:
query_job = big_query.jobs().query(
projectId=project_id,
body=query_data).execute(num_retries=NUM_RETRIES)
except HttpError as http_error:
print('Query execute job failed with error: %s' % http_error)
print(http_error.content)
return query_job
# List of (column name, column type, description) tuples
def make_row(unique_row_id, row_values_dict):
"""row_values_dict is a dictionary of column name and column value.
"""
return {'insertId': unique_row_id, 'json': row_values_dict}
| 0.000283 |
import serial
from time import sleep
s = serial.Serial(port='com7', baudrate=2500000)
magic_cookie = bytearray([0x01])
image_data = bytearray([0x00, 0x00, 0x00, 0x00, 0x88, 0x88, 0x88, 0x88, 0x00,
0x00, 0x00, 0x00, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00,
0x00, 0x00, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00, 0x00,
0x00, 0x88, 0x88, 0x88, 0x88])
print(type(image_data))
print(image_data)
# image_data = str(image_data)
# iamge_data = bytes(image_data)
for i in range(500):
sleep(0.015)
s.write(magic_cookie)
for j in range(128 * 3):
s.write(image_data)
image_data.append(image_data.pop(0))
# image_data = bytearray(
# [0x00, 0x00, 0x00, 0x00, 0x88, 0x88, 0x88, 0x88, 0x00,
# 0x00, 0x00, 0x00, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00,
# 0x00, 0x00, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00, 0x00,
# 0x00, 0x88, 0x88, 0x88, 0x88])
#
# for i in range(500):
# sleep(0.015)
# s.write(magic_cookie)
# for j in range(128 * 3):
# s.write(image_data)
image_data.append(image_data.pop(0)) | 0.001779 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
from frappe import throw, _
from frappe.model.document import Document
from frappe.email.queue import check_email_limit
from frappe.utils.verified_command import get_signed_params, verify_request
from frappe.utils.background_jobs import enqueue
from frappe.utils.scheduler import log
from frappe.email.queue import send
from frappe.email.doctype.email_group.email_group import add_subscribers
class Newsletter(Document):
def autoname(self):
self.name = self.subject
def onload(self):
if self.email_sent:
self.get("__onload").status_count = dict(frappe.db.sql("""select status, count(name)
from `tabEmail Queue` where reference_doctype=%s and reference_name=%s
group by status""", (self.doctype, self.name))) or None
def test_send(self, doctype="Lead"):
self.recipients = frappe.utils.split_emails(self.test_email_id)
self.queue_all()
frappe.msgprint(_("Scheduled to send to {0}").format(self.test_email_id))
def send_emails(self):
"""send emails to leads and customers"""
if self.email_sent:
throw(_("Newsletter has already been sent"))
self.recipients = self.get_recipients()
if getattr(frappe.local, "is_ajax", False):
self.validate_send()
# using default queue with a longer timeout as this isn't a scheduled task
enqueue(send_newsletter, queue='default', timeout=3000, event='send_newsletter', newsletter=self.name)
else:
self.queue_all()
frappe.msgprint(_("Scheduled to send to {0} recipients").format(len(self.recipients)))
frappe.db.set(self, "email_sent", 1)
def queue_all(self):
if not self.get("recipients"):
# in case it is called via worker
self.recipients = self.get_recipients()
self.validate_send()
sender = self.send_from or frappe.utils.get_formatted_email(self.owner)
if not frappe.flags.in_test:
frappe.db.auto_commit_on_many_writes = True
send(recipients = self.recipients, sender = sender,
subject = self.subject, message = self.message,
reference_doctype = self.doctype, reference_name = self.name,
unsubscribe_method = "/api/method/frappe.email.doctype.newsletter.newsletter.unsubscribe",
unsubscribe_params = {"name": self.email_group},
send_priority = 0)
if not frappe.flags.in_test:
frappe.db.auto_commit_on_many_writes = False
def get_recipients(self):
"""Get recipients from Email Group"""
return [d.email for d in frappe.db.get_all("Email Group Member", ["email"],
{"unsubscribed": 0, "email_group": self.email_group})]
def validate_send(self):
if self.get("__islocal"):
throw(_("Please save the Newsletter before sending"))
check_email_limit(self.recipients)
@frappe.whitelist(allow_guest=True)
def unsubscribe(email, name):
if not verify_request():
return
subs_id = frappe.db.get_value("Email Group Member", {"email": email, "email_group": name})
if subs_id:
subscriber = frappe.get_doc("Email Group Member", subs_id)
subscriber.unsubscribed = 1
subscriber.save(ignore_permissions=True)
frappe.db.commit()
return_unsubscribed_page(email)
def return_unsubscribed_page(email):
frappe.respond_as_web_page(_("Unsubscribed"), _("{0} has been successfully unsubscribed from this list.").format(email))
def create_lead(email_id):
"""create a lead if it does not exist"""
from email.utils import parseaddr
from frappe.model.naming import get_default_naming_series
real_name, email_id = parseaddr(email_id)
if frappe.db.get_value("Lead", {"email_id": email_id}):
return
lead = frappe.get_doc({
"doctype": "Lead",
"email_id": email_id,
"lead_name": real_name or email_id,
"status": "Lead",
"naming_series": get_default_naming_series("Lead"),
"company": frappe.db.get_default("Company"),
"source": "Email"
})
lead.insert()
@frappe.whitelist(allow_guest=True)
def subscribe(email):
url = frappe.utils.get_url("/api/method/frappe.email.doctype.newsletter.newsletter.confirm_subscription") +\
"?" + get_signed_params({"email": email})
messages = (
_("Thank you for your interest in subscribing to our updates"),
_("Please verify your email id"),
url,
_("Click here to verify")
)
content = """
<p>{0}. {1}.</p>
<p><a href="{2}">{3}</a></p>
"""
frappe.sendmail(email, subject=_("Confirm Your Email"), content=content.format(*messages))
@frappe.whitelist(allow_guest=True)
def confirm_subscription(email):
if not verify_request():
return
if not frappe.db.exists("Email Group", _("Website")):
frappe.get_doc({
"doctype": "Email Group",
"title": _("Website")
}).insert(ignore_permissions=True)
frappe.flags.ignore_permissions = True
add_subscribers(_("Website"), email)
frappe.db.commit()
frappe.respond_as_web_page(_("Confirmed"), _("{0} has been successfully added to our Email Group.").format(email))
def send_newsletter(newsletter):
try:
doc = frappe.get_doc("Newsletter", newsletter)
doc.queue_all()
except:
frappe.db.rollback()
# wasn't able to send emails :(
doc.db_set("email_sent", 0)
frappe.db.commit()
log("send_newsletter")
raise
else:
frappe.db.commit()
| 0.028653 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wanopt_peer
short_description: Configure WAN optimization peers in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wanopt feature and peer category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wanopt_peer:
description:
- Configure WAN optimization peers.
default: null
type: dict
suboptions:
ip:
description:
- Peer IP address.
type: str
peer_host_id:
description:
- Peer host ID.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure WAN optimization peers.
fortios_wanopt_peer:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
wanopt_peer:
ip: "<your_own_value>"
peer_host_id: "myhostname"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_wanopt_peer_data(json):
option_list = ['ip', 'peer_host_id']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wanopt_peer(data, fos):
vdom = data['vdom']
state = data['state']
wanopt_peer_data = data['wanopt_peer']
filtered_data = underscore_to_hyphen(filter_wanopt_peer_data(wanopt_peer_data))
if state == "present":
return fos.set('wanopt',
'peer',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wanopt',
'peer',
mkey=filtered_data['peer-host-id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_wanopt(data, fos):
if data['wanopt_peer']:
resp = wanopt_peer(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wanopt_peer": {
"required": False, "type": "dict", "default": None,
"options": {
"ip": {"required": False, "type": "str"},
"peer_host_id": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_wanopt(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_wanopt(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 0.001437 |
#!/usr/bin/python
import sys, getopt
import xml.etree.ElementTree as etree
def printhelp():
print('Tool for extracting node positions from .csc files.')
print('Node IDs have to be in strict ascending order in the .csc file.')
print('Node IDs should start with 1.')
print('')
print('Usage:')
print(' position.py -i <inputfile>')
print('')
print('Options:')
print(' -h --help Show this screen.')
print(' -i --input=<inputfile> The file to parse.')
return
def main(argv):
inputfile = ''
string = 'NODE_POSITION_ARRAY = '
idcheck = -1
try:
opts, args = getopt.getopt(argv,"hi:",["help", "input="])
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
printhelp()
sys.exit()
elif opt in ("-i", "--input"):
inputfile = arg
else:
printhelp()
sys.exit()
if(len(opts) == 0):
printhelp()
sys.exit()
#parse XML into etree
tree = etree.parse(inputfile)
#get root
root = tree.getroot()
#find all mote nodes
for mote in root.find('simulation').findall('mote'):
conf = mote.findall('interface_config')
if(len(conf) != 2):
print('Input is not a valid csc file!')
sys.exit(2)
if(idcheck == -1):
idcheck = int(conf[1].find('id').text)
if(idcheck != 1):
print('IDs should start with 1!')
sys.exit(2)
elif(int(conf[1].find('id').text) == idcheck + 1):
idcheck = idcheck + 1
else:
print('IDs are not in strict ascending order!')
sys.exit(2)
if(conf[0].find('x').text[-2:] != '.0' or conf[0].find('y').text[-2:] != '.0'):
print('#Attention: Decimal places were rounded: {' + conf[0].find('x').text + ',' + conf[0].find('y').text + '}')
string += '{' + str(int(round(float(conf[0].find('x').text)))) + ',' + str(int(round(float(conf[0].find('y').text)))) + '},'
print(string[:-1])
print('NODE_POSITION_ARRAY_LENGTH = ' + str(len(root.find('simulation').findall('mote'))))
print('POSITION_ARRAY_RIMEADDR_OFFSET = 1')
if __name__ == "__main__":
main(sys.argv[1:])
| 0.034696 |
#
# Formatting tasks
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.modules.common.task import Task
from pyanaconda.anaconda_loggers import get_module_logger
import gi
gi.require_version("BlockDev", "2.0")
from gi.repository import BlockDev as blockdev
log = get_module_logger(__name__)
__all__ = ["FindFormattableDASDTask", "DASDFormatTask"]
class FindFormattableDASDTask(Task):
"""A task for finding DASDs for formatting."""
def __init__(self, disks, can_format_unformatted=False, can_format_ldl=False):
"""Create a new task.
:param disks: a list of disks to search
:param can_format_unformatted: can we format unformatted?
:param can_format_ldl: can we format LDL?
"""
super().__init__()
self._disks = disks
self._can_format_unformatted = can_format_unformatted
self._can_format_ldl = can_format_ldl
@property
def name(self):
"""Name of the task."""
return "Finding DASDs for formatting"
def run(self):
"""Run the task."""
return list(set(
self._get_unformatted_dasds(self._disks)
+ self._get_ldl_dasds(self._disks)
))
def _get_unformatted_dasds(self, disks):
"""Returns a list of unformatted DASDs."""
result = []
if not self._can_format_unformatted:
log.debug("We are not allowed to format unformatted DASDs.")
return result
for disk in disks:
if self._is_unformatted_dasd(disk):
log.debug("Found unformatted DASD: %s (%s)", disk.path, disk.busid)
result.append(disk)
return result
def _is_unformatted_dasd(self, disk):
"""Is it an unformatted DASD?"""
return self._is_dasd(disk) and blockdev.s390.dasd_needs_format(disk.busid)
def _is_dasd(self, disk):
"""Is it a DASD disk?"""
return disk.type == "dasd"
def _get_ldl_dasds(self, disks):
"""Returns a list of LDL DASDs."""
result = []
if not self._can_format_ldl:
log.debug("We are not allowed to format LDL DASDs.")
return result
for disk in disks:
if self._is_ldl_dasd(disk):
log.debug("Found LDL DASD: %s (%s)", disk.path, disk.busid)
result.append(disk)
return result
def _is_ldl_dasd(self, disk):
"""Is it an LDL DASD?"""
return self._is_dasd(disk) and blockdev.s390.dasd_is_ldl(disk.name)
class DASDFormatTask(Task):
"""A task for formatting DASDs"""
def __init__(self, dasds):
"""Create a new task.
:param dasds: a list of names of DASDs to format
"""
super().__init__()
self._dasds = dasds
@property
def name(self):
return "Formatting DASDs"
def run(self):
for disk_name in self._dasds:
self._do_format(disk_name)
def _do_format(self, disk_name):
"""Format the specified DASD disk."""
try:
self.report_progress("Formatting {}".format(disk_name))
blockdev.s390.dasd_format(disk_name)
except blockdev.S390Error as err:
self.report_progress("Failed formatting {}".format(disk_name))
log.error(err)
| 0.000949 |
# Macros
#
CODING_BUG = """It looks like you've hit a bug in the server. Please, \
do not hesitate to report it at http://bugs.cherokee-project.com/ so \
the developer team can fix it."""
UNKNOWN_CAUSE = """An unexpected error has just occurred in the \
server. The cause of the issue is unknown. Please, do not hesitate to \
report it at http://bugs.cherokee-project.com/ so the developer team \
can fix it."""
SYSTEM_ISSUE = """The issue seems to be related to your system."""
BROKEN_CONFIG = """The configuration file seems to be broken."""
INTERNAL_ISSUE = """The server found an internal problem. """
# cherokee/proxy_host.c
#
e('PROXY_HEADER_PARSE',
title = "Could not parse header from the back-end",
desc = "It looks like the back-end server sent a malformed HTTP response.",
debug = "Dump of the header buffer (len=%d): %s")
# cherokee/source.c
#
e('SOURCE_NONBLOCK',
title = "Failed to set nonblocking (fd=%d): ${errno}",
desc = SYSTEM_ISSUE)
# cherokee/rrd_tools.c
#
e('RRD_NO_BINARY',
title = "Could not find the rrdtool binary.",
desc = "A custom rrdtool binary has not been defined, and the server could not find one in the $PATH.",
debug = "PATH=%s",
admin = '/general#tabs_general-0',
show_bt = False)
e('RRD_EXECV',
title = "execv failed cmd='%s': ${errno}",
desc = SYSTEM_ISSUE)
e('RRD_FORK',
title = "Fork failed pid=%d: ${errno}",
desc = SYSTEM_ISSUE)
e('RRD_WRITE',
title = "Cannot write in %s: ${errno}",
desc = SYSTEM_ISSUE)
e('RRD_DIR_PERMS',
title = "Insufficient permissions to work with the RRD directory: %s",
desc = SYSTEM_ISSUE,
admin = '/general#tabs_general-0')
e('RRD_MKDIR_WRITE',
title = "Cannot create the '%s' directory",
desc = SYSTEM_ISSUE,
admin = '/general#tabs_general-0')
# cherokee/balancer_ip_hash.c
#
e('BALANCER_IP_REACTIVE',
title = "Taking source='%s' back on-line: %d active.",
desc = "The server is re-enabling one of the Information Sources.")
e('BALANCER_IP_DISABLE',
title = "Taking source='%s' off-line. Active %d.",
desc = "The server is disabling one of the Information Sources.")
e('BALANCER_IP_EXHAUSTED',
title = "Sources exhausted: re-enabling one.",
desc = "All the information sources are disabled at this moment. Cherokee needs to re-enable at least one.")
# cherokee/balancer_failover.c
#
e('BALANCER_FAILOVER_REACTIVE',
title = "Taking source='%s' back on-line.",
desc = "The server is re-enabling one of the Information Sources.")
e('BALANCER_FAILOVER_DISABLE',
title = "Taking source='%s' off-line.",
desc = "The server is disabling one of the Information Sources.")
e('BALANCER_FAILOVER_ENABLE_ALL',
title = "Taking all sources back on-line.",
desc = "All the Information Sources have been off-lined. The server is re-enabling all of them in order to start over again.")
# cherokee/resolv_cache.c
#
e('RESOLVE_TIMEOUT',
title = "Timed out while resolving '%s'",
desc = "For some reason, Cherokee could not resolve the hostname.")
# cherokee/validator_authlist.c
#
e('VALIDATOR_AUTHLIST_USER',
title = "Could not read 'user' value for '%s'",
desc = BROKEN_CONFIG)
e('VALIDATOR_AUTHLIST_PASSWORD',
title = "Could not read 'password' value for '%s'",
desc = BROKEN_CONFIG)
e('VALIDATOR_AUTHLIST_EMPTY',
title = "Empty authlist: Access will be denied.",
desc = "The access to this resource will be denied as long as the list of allowed users is empty.")
# cherokee/validator_pam.c
#
e('VALIDATOR_PAM_DELAY',
title = "Setting pam fail delay failed",
desc = "Cherokee could not configure PAM propertly. Most likely you have found an incompatibility issue between Cherokee and your system PAM library.")
e('VALIDATOR_PAM_AUTH',
title = "User '%s' - not authenticated: %s",
desc = "Most likely the password did not match")
e('VALIDATOR_PAM_ACCOUNT',
title = "User '%s' - invalid account: %s",
desc = "The specified user does not exist on the system.")
# cherokee/validator_ldap.c
#
e('VALIDATOR_LDAP_KEY',
title = "Validator LDAP: Unknown key: '%s'",
desc = BROKEN_CONFIG)
e('VALIDATOR_LDAP_PROPERTY',
title = "The LDAP validation module requires a '%s' property",
desc = "It looks like you did not fill a required property. Check the LDAP details and try again.")
e('VALIDATOR_LDAP_SECURITY',
title = "Security problem found in LDAP validation config",
desc = "LDAP validator: Potential security problem found: anonymous bind validation. Check (RFC 2251, section 4.2.2)")
e('VALIDATOR_LDAP_CONNECT',
title = "Could not connect to LDAP: %s:%d: '${errno}'",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_V3',
title = "Could not set the LDAP version 3: %s",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_CA',
title = "Could not set CA file %s: %s",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_STARTTLS',
title = "cannot StartTLS, it is not supported by LDAP client libraries",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_BIND',
title = "Could not bind (%s:%d): %s:%s : %s",
desc = SYSTEM_ISSUE)
e('VALIDATOR_LDAP_SEARCH',
title = "Could not search in LDAP server: %s",
desc = SYSTEM_ISSUE)
# cherokee/validator_file.c
#
e('VALIDATOR_FILE',
title = "Unknown path type '%s'",
desc = BROKEN_CONFIG)
e('VALIDATOR_FILE_NO_FILE',
title = "File based validators need a password file",
desc = "This validation modules reads a local file in order to get the authorizated user list. The configuration specifies. Please try to reconfigure the details and ensure a filename is provided.")
# cherokee/validator.c
#
e('VALIDATOR_METHOD_UNKNOWN',
title = "Unknown authentication method '%s'",
desc = BROKEN_CONFIG)
# cherokee/handler_*.c
#
e('HANDLER_REGEX_GROUPS',
title = "Too many groups in the regex",
desc = "The specified regular expression is wrong. Please double check it.")
e('HANDLER_NO_BALANCER',
title = "The handler needs a balancer",
desc = BROKEN_CONFIG)
# cherokee/handler_secdownload.c
#
e('HANDLER_SECDOWN_SECRET',
title = "Handler secdownload needs a secret",
desc = "You must define a passphrase to be used as shared secret between the Hidden Downloads handler and the script you use to generate the URLs.")
# cherokee/handler_server_info.c
#
e('HANDLER_SRV_INFO_MOD',
title = "Unknown module type (%d)",
desc = CODING_BUG)
e('HANDLER_SRV_INFO_TYPE',
title = "Unknown ServerInfo type: '%s'",
desc = "Your configuration file is either broken, obsolete or has been tampered with. You need to reconfigure the verbosity of your ServerInfo handler.")
# cherokee/handler_file.c
#
e('HANDLER_FILE_TIME_PARSE',
title = "Unparseable time '%s'")
# cherokee/handler_ssi.c
#
e('HANDLER_SSI_PROPERTY',
title = "Unknown SSI property: '%s'",
desc = BROKEN_CONFIG)
# cherokee/handler_fcgi.c
#
e('HANDLER_FCGI_VERSION',
title = "Parsing error: unknown version")
e('HANDLER_FCGI_PARSING',
title = "Parsing error: unknown type")
e('HANDLER_FCGI_STDERR',
title = "%s")
e('HANDLER_FCGI_BALANCER',
title = "Found a FastCGI handler without a Load Balancer",
desc = BROKEN_CONFIG)
# cherokee/handler_error_redir.c
#
e('HANDLER_ERROR_REDIR_CODE',
title = "Wrong error code: '%s'",
desc = BROKEN_CONFIG)
e('HANDLER_ERROR_REDIR_URL',
title = "HTTP Error %d redirection: An 'url' property is required",
desc = BROKEN_CONFIG)
# cherokee/handler_dirlist.c
#
e('HANDLER_DIRLIST_THEME',
title = "Could not load theme '%s': %s",
desc = "Either the directory where your theme resides has been deleted, or the permissions are wrong.")
e('HANDLER_DIRLIST_BAD_THEME',
title = "The theme is incomplete",
desc = "Most likely someone has inadvertedly deleted some of the files of your theme. Please try to restore the files or change your theme selection.")
# cherokee/handler_post_report.c
#
e('HANDLER_POST_REPORT_LANG',
title = "Unrecognized language '%s'",
desc = "Cherokee's POST status reporter supports a number of output languages and formats, including: JSON, Python, PHP and Ruby.")
# cherokee/handler_dbslayer.c
#
e('HANDLER_DBSLAYER_LANG',
title = "Unrecognized language '%s'",
desc = "Cherokee's DBSlayer supports a number of output languages and formats, including: JSON, Python, PHP and Ruby. Please reconfigure the DBSlayer rule to match one of those.")
e('HANDLER_DBSLAYER_BALANCER',
title = "DBSlayer handler needs a balancer",
desc = "The DBSlayer handler needs must specify a load balancing strategy and a list of target hosts to attend the load. At least one host is required. Please ensure it is correctly configured.")
# cherokee/handler_custom_error.c
#
e('HANDLER_CUSTOM_ERROR_HTTP',
title = "Handler custom error needs an HTTP error value.",
desc = BROKEN_CONFIG)
# cherokee/handler_cgi.c
#
e('HANDLER_CGI_SET_PROP',
title = "Setting pipe properties fd=%d: '${errno}'",
desc = SYSTEM_ISSUE)
e('HANDLER_CGI_SETID',
title = "%s: could not set UID %d",
desc = "Most probably the server is not running as root, and therefore it cannot switch to a new user. If you want Cherokee to be able to change use UID to execute CGIs, you'll have to run it as root.")
e('HANDLER_CGI_EXECUTE',
title = "Could not execute '%s': %s",
desc = SYSTEM_ISSUE)
e('HANDLER_CGI_GET_HOSTNAME',
title = "Error getting host name.",
desc = SYSTEM_ISSUE)
# cherokee/config_entry.c
#
e('CONFIG_ENTRY_BAD_TYPE',
title = "Wrong plug-in: The module must implement a handler.",
desc = "The server tried to set a handler, but the loaded plug-in contained another sort of module.")
# cherokee/balancer_*.c
#
e('BALANCER_EMPTY',
title = "The Load Balancer cannot be empty",
desc = BROKEN_CONFIG)
e('BALANCER_UNDEFINED',
title = "Balancer defined without a value",
desc = BROKEN_CONFIG)
e('BALANCER_NO_KEY',
title = "Balancer: No '%s' log has been defined.",
desc = BROKEN_CONFIG)
e('BALANCER_BAD_SOURCE',
title = "Could not find source '%s'",
desc = "For some reason the load balancer module is using a missing Information Source. Please recheck that it uses a correct one.",
admin = "/source")
e('BALANCER_ONLINE_SOURCE',
title = "Taking source='%s' back on-line",
desc = "The information source is being re-enabled.")
e('BALANCER_OFFLINE_SOURCE',
title = "Taking source='%s' back on-line",
desc = "The information source is being disabled.")
e('BALANCER_EXHAUSTED',
title = "Sources exhausted: re-enabling one.",
desc = "All the Information Sources have been off-lined. The server needs to re-enable at least one of them.")
# cherokee/encoder_*.c
#
e('ENCODER_NOT_SET_VALUE',
title = "Encoder init error",
desc = "The server did not found a valid initialization value for the encoder",
debug = "%s")
e('ENCODER_DEFLATEINIT2',
title = "deflateInit2(): %s",
desc = SYSTEM_ISSUE)
e('ENCODER_DEFLATEEND',
title = "deflateEnd(): %s",
desc = SYSTEM_ISSUE)
e('ENCODER_DEFLATE',
title = "deflate(): err=%s, avail=%d",
desc = SYSTEM_ISSUE)
# cherokee/logger_*.c
#
e('LOGGER_NO_KEY',
title = "Logger: No '%s' log has been defined.",
desc = BROKEN_CONFIG)
e('LOGGER_NO_WRITER',
title = "Logger writer type is required.",
desc = BROKEN_CONFIG)
e('LOGGER_WRITER_UNKNOWN',
title = "Unknown logger writer type '%s'",
desc = BROKEN_CONFIG)
e('LOGGER_WRITER_READ',
title = "Logger writer (%s): Could not read the filename.",
desc = "A property of the log writer is missing. Odds are you selected to write the output of the log into a file but you did not define it.")
e('LOGGER_WRITER_APPEND',
title = "Could not open '%s' for appending",
desc = "This is probably related to the file permissions. Please make sure that it is writable for the user under which Cherokee is run.")
e('LOGGER_WRITER_ALLOC',
title = "Allocation logger->max_bufsize %d failed.",
desc = "The system might have run out of memory.")
e('LOGGER_WRITER_PIPE',
title = "Could not create pipe (errno=%d): ${errno}",
desc = SYSTEM_ISSUE)
e('LOGGER_WRITER_FORK',
title = "Could not fork (errno=%d): ${errno}",
desc = SYSTEM_ISSUE)
e('LOGGER_X_REAL_IP_PARSE',
title = "Could not parse X-Real-IP access list",
desc = "You must define an access list in order to activate the X-Real-IP support.")
# cherokee/logger_custom.c
#
e('LOGGER_CUSTOM_NO_TEMPLATE',
title = "A template is needed for logging connections: %s",
desc = "Since you are trying to use a custom logging template, providing the template is mandatory.")
e('LOGGER_CUSTOM_TEMPLATE',
title = "Could not parse custom log: '%s'",
desc = "The server found a problem while processing the logging template. Please ensure it is correct.")
# cherokee/fdpoll-epoll.c
#
e('FDPOLL_EPOLL_CTL_ADD',
title = "epoll_ctl: ep_fd %d, fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_EPOLL_CTL_DEL',
title = "epoll_ctl: ep_fd %d, fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_EPOLL_CTL_MOD',
title = "epoll_ctl: ep_fd %d, fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_EPOLL_CREATE',
title = "epoll_create: %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_EPOLL_CLOEXEC',
title = "Could not set CloseExec to the epoll descriptor: fcntl: '${errno}'",
desc = SYSTEM_ISSUE)
# cherokee/fdpoll-port.c
#
e('FDPOLL_PORTS_FD_ASSOCIATE',
title = "fd_associate: fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_PORTS_ASSOCIATE',
title = "port_associate: fd %d: '${errno}'",
desc = SYSTEM_ISSUE)
e('FDPOLL_PORTS_GETN',
title = "port_getn: '${errno}'",
desc = SYSTEM_ISSUE)
# cherokee/fdpoll-poll.c
#
e('FDPOLL_POLL_FULL',
title = "The FD Poll is full",
desc = "The server reached the file descriptor limit. This usaully happens when many simultaneous connections are kept open. Try to increase this limit.",
admin = "/advanced#Resources-2")
e('FDPOLL_POLL_DEL',
title = "Could not remove fd %d (idx=%d) from the poll",
desc = CODING_BUG)
# cherokee/fdpoll-kqueue.c
#
e('FDPOLL_KQUEUE',
title = "kevent returned: '${errno}'",
desc = SYSTEM_ISSUE)
# cherokee/gen_evhost.c
#
e('GEN_EVHOST_TPL_DROOT',
title = "EvHost needs a 'tpl_document_root property'",
desc = BROKEN_CONFIG)
e('GEN_EVHOST_PARSE',
title = "EvHost: Could not parse template '%s'",
desc = "Could not parse the template definining how virtual servers are located. You need to re-define the Dynamic Document Root for your Advanced Virtual Hosting.")
# cherokee/vrule_*.c
#
e('VRULE_NO_PROPERTY',
title = "Virtual Server Rule prio=%d needs a '%s' property",
desc = BROKEN_CONFIG)
# cherokee/vrule_target_ip.c
#
e('VRULE_TARGET_IP_PARSE',
title = "Could not parse 'to' entry: '%s'",
desc = BROKEN_CONFIG)
# cherokee/vrule_rehost.c
#
e('VRULE_REHOST_NO_DOMAIN',
title = "Virtual Server '%s' regex vrule needs a 'domain' entry",
desc = BROKEN_CONFIG)
# cherokee/rule_*.c
#
e('RULE_NO_PROPERTY',
title = "Rule prio=%d needs a '%s' property",
desc = BROKEN_CONFIG)
# cherokee/rule_request.c
#
e('RULE_REQUEST_NO_TABLE',
title = "Could not access to the RegEx table",
desc = CODING_BUG)
e('RULE_REQUEST_NO_PCRE_PTR',
title = "RegExp rule has null pcre",
desc = CODING_BUG)
# cherokee/rule_method.c
#
e('RULE_METHOD_UNKNOWN',
title = "Could not recognize HTTP method '%s'",
desc = "The rule found an entry with an unsupported HTTP method. Probably the configuration file has been tampered with.")
# cherokee/rule_header.c
#
e('RULE_HEADER_UNKNOWN_HEADER',
title = "Unknown header '%s'",
desc = "The rule found an entry with an unsupported header. Probably the configuration file has been tampered with.")
e('RULE_HEADER_UNKNOWN_TYPE',
title = "Unknown type '%s'",
desc = "The rule found an entry with an unsupported type. Probably the configuration file has been tampered with.")
# cherokee/rule_from.c
#
e('RULE_FROM_ENTRY',
title = "Could not parse 'from' entry: '%s'",
desc = "The entries of this rule must be either IP address or network masks. Both IPv4 and IPv6 addresses and masks are supported.")
# cherokee/rule_bind.c
#
e('RULE_BIND_PORT',
title = "Rule prio=%d type='bind', invalid port='%s'",
desc = BROKEN_CONFIG)
# cherokee/server.c
#
e('SERVER_GROUP_NOT_FOUND',
title = "Group '%s' not found in the system",
desc = "Seem like you've specified a wrong GID. Change the specified one or try to create it using the addgroup/groupadd command.",
admin = "/general#Permissions-3")
e('SERVER_USER_NOT_FOUND',
title = "User '%s' not found in the system",
desc = "Looks like you've specified a wrong UID. Either change the specified one or try to create it using the adduser/useradd command.",
admin = "/general#Permissions-3")
e('SERVER_THREAD_IGNORE',
title = "Ignoring thread_policy entry '%s'",
desc = "It looks like an error ocurred with the selected OS thread policy and it has been ignored. Once a valid one is selected, the issue will be fixed.",
admin = "/advanced#Resources-2")
e('SERVER_THREAD_POLICY',
title = "Unknown thread policy '%s'",
desc = "The specified OS thread policy is unknown. You should try re-selecting one.",
admin = "/advanced#Resources-2")
e('SERVER_TOKEN',
title = "Unknown server token '%s'",
desc = "An incorrect server token was specified. Please choose one that is available in you Network behavior settings.",
admin = "/general")
e('SERVER_POLLING_UNRECOGNIZED',
title = "Polling method '%s' has not been recognized",
desc = "An incorrect polling method was specified. Please try to fix that in your advanced settings.",
admin = "/advanced#Connections-1")
e('SERVER_POLLING_UNSUPPORTED',
title = "Polling method '%s' is not supported by this OS",
desc = "The specified polling method does not work on your platform. Please try to choose another one in your advanced settings.",
admin = "/advanced#Connections-1")
e('SERVER_POLLING_UNKNOWN',
title = "Unknown polling method '%s'",
desc = "An incorrect polling method was specified. Please try to fix that in your advanced settings.",
admin = "/advanced#Connections-1")
e('SERVER_NO_BIND',
title = "Not listening on any port.",
desc = "The web server needs to be associated to a TCP port. Please try to specify that in your general settings.",
admin = "/general#Ports_to_listen-2")
e('SERVER_IGNORE_TLS',
title = "Ignoring TLS port %d",
desc = "No TLS backend is specified, but the configuration specifies a secure port and it is being ignored. Either enable a TLS backend or disable the TLS checkbox for the specified port.",
admin = "/general#Network-1",
show_bt = False)
e('SERVER_TLS_DEFAULT',
title = "TLS/SSL support required for 'default' Virtual Server.",
desc = "TLS/SSL support must be set up in the 'default' Virtual Server. Its certificate will be used by the server in case TLS SNI information is not provided by the client.")
e('SERVER_NO_CRYPTOR',
title = "Virtual Server '%s' is trying to use SSL/TLS, but no Crypto engine is active.",
desc = "For a Virtual Server to use SSL/TLS, a Crypto engine must be available server-wide.")
e('SERVER_PARSE',
title = "Server parser: Unknown key '%s'",
desc = BROKEN_CONFIG)
e('SERVER_INITGROUPS',
title = "initgroups: Unable to set groups for user '%s' and GID %d",
desc = SYSTEM_ISSUE)
e('SERVER_SETGID',
title = "cannot change group to GID %d, running with GID=%d",
desc = "Most probably you the server did not have enough permissions to change its execution group.")
e('SERVER_SETUID',
title = "cannot change group to UID %d, running with UID=%d",
desc = "Most probably you the server did not have enough permissions to change its execution user.")
e('SERVER_GET_FDLIMIT',
title = "Could not get File Descriptor limit",
desc = SYSTEM_ISSUE,
debug = "poll_type = %d")
e('SERVER_FDS_SYS_LIMIT',
title = "The server FD limit seems to be higher than the system limit",
desc = "The opened file descriptor limit of the server is %d, while the limit of the system is %d. This is an unlikely situation. You could try to raise the opened file descriptor limit of your system.")
e('SERVER_THREAD_POLL',
title = "The FD limit of the thread is greater than the limit of the poll",
desc = "It seems that an internal server thread assumed a file descriptor limit of %d. However, its FD poll has a lower limit of %d descriptors. The limit has been reduced to the poll limit.")
e('SERVER_NEW_THREAD',
title = "Could not create an internal server thread",
desc = "This is a extremely unusual error. For some reason the server could not create a thread while launching the server.",
debug = "ret = %d")
e('SERVER_TLS_INIT',
title = "cannot initialize TLS for '%s' virtual host",
desc = "This is usually caused by an error with a certificate or private key.")
e('SERVER_FD_SET',
title = "Unable to raise file descriptor limit to %d",
desc = SYSTEM_ISSUE,
show_bt = False)
e('SERVER_FD_GET',
title = "Unable to read the file descriptor limit of the system",
desc = SYSTEM_ISSUE)
e('SERVER_LOW_FD_LIMIT',
title = "The number of available file descriptors is too low",
desc = "The number of available file descriptors: %d, is too low. At least there should be %d available. Please, try to raise your system file descriptor limit.")
e('SERVER_UID_GET',
title = "Could not get information about the UID %d",
desc = SYSTEM_ISSUE)
e('SERVER_CHROOT',
title = "Could not chroot() to '%s': '${errno}'",
desc = SYSTEM_ISSUE)
e('SERVER_CHDIR',
title = "Could not chdir() to '%s': '${errno}'",
desc = SYSTEM_ISSUE)
e('SERVER_SOURCE',
title = "Invalid Source entry '%s'",
desc = BROKEN_CONFIG)
e('SERVER_SOURCE_TYPE',
title = "Source %d: An entry 'type' is required",
desc = BROKEN_CONFIG)
e('SERVER_SOURCE_TYPE_UNKNOWN',
title = "Source %d has an unknown type: '%s'",
desc = BROKEN_CONFIG)
e('SERVER_VSERVER_PRIO',
title = "Invalid Virtual Server entry '%s'",
desc = BROKEN_CONFIG)
e('SERVER_NO_VSERVERS',
title = "No virtual hosts have been configured",
desc = "There should exist at least one virtual server.")
e('SERVER_NO_DEFAULT_VSERVER',
title = "Lowest priority virtual server must be 'default'",
desc = "The lowest priority virtual server should be named 'default'.")
e('SERVER_FORK',
title = "Could not fork()",
desc = SYSTEM_ISSUE)
e('SERVER_PANIC',
title = "Could not execute the Panic handler: '%s', status %d",
desc = "Something happened with the server, and it felt panic. It tried to call an external program to report it to the administrator, but it failed.")
# cherokee/source_interpreter.c
#
e('SRC_INTER_NO_USER',
title = "User '%s' not found in the system",
desc = "The server is configured to execute an interpreter as a different user. However, it seems that the user does not exist in the system.",
admin = "/source/%d")
e('SRC_INTER_NO_GROUP',
title = "Group '%s' not found in the system",
desc = "The server is configured to execute an interpreter as a different group. However, it seems that the group does not exist in the system.",
admin = "/source/%d")
e('SRC_INTER_EMPTY_INTERPRETER',
title = "There is a 'Interpreter Source' witout an interpreter.",
desc = "The server configuration defines an 'interpreter' information source that does not specify an interpreter.",
admin = "/source/%d")
e('SRC_INTER_NO_INTERPRETER',
title = "Could not find interpreter '%s'",
desc = "The server configuration refers to an interpreter that is not installed in this system.",
admin = "/source/%d")
e('SRC_INTER_ENV_IN_COMMAND',
title = "The command to launch the interpreter contains environment variables",
desc = "Please remove the environment variables from the command, and add them as such.",
admin = "/source/%d",
debug = "Command: %s")
e('SRC_INTER_SPAWN',
title = "Could not spawn '%s'",
desc = SYSTEM_ISSUE)
e('SRC_INTER_SETUID',
title = "Can't change setuid %d",
desc = SYSTEM_ISSUE)
e('SRC_INTER_SETGID',
title = "Can't change setgid %d",
desc = SYSTEM_ISSUE)
e('SRC_INTER_CHROOT',
title = "Could not chroot() to '%s'",
desc = SYSTEM_ISSUE)
# cherokee/config_reader.c
#
e('CONF_READ_ACCESS_FILE',
title = "Could not access file",
desc = "The configuration file '%s' could not be accessed. Most probably the server user does not have enough permissions to read it.",
show_bt = False)
e('CONF_READ_CHILDREN_SAME_NODE',
title = "'%s' and '%s' as child of the same node",
desc = CODING_BUG)
e('CONF_READ_PARSE',
title = "Parsing error",
desc = "The server could not parse the configuration. Something must be wrong with formation. At this stage the lexical is checked.",
debug = "%s")
# cherokee/flcache.c
#
e('FLCACHE_CHOWN',
title = "Could not chown the FLCache directory '%s' to user '%s' and group '%s'",
desc = SYSTEM_ISSUE)
e('FLCACHE_MKDIR',
title = "Could not create the '%s' directory, or it doesn't have %s permissions",
desc = SYSTEM_ISSUE)
e('FLCACHE_MKDIRS',
title = "Could not create the FLCache temporal directy neither under %s nor under %s, or it doesn't have %s permissions",
desc = SYSTEM_ISSUE)
e('FLCACHE_CREATE_FILE',
title = "Could not create the '%s' cache object file: ${errno}",
desc = SYSTEM_ISSUE)
# cherokee/template.c
#
e('TEMPLATE_NO_TOKEN',
title = "Token not found '%s'",
desc = "It seems that the template uses an undefined token.")
# cherokee/services-client.c
#
e('CLIENT_ALREADY_INIT',
title = "Could not initialise service client, already initialised.",
desc = CODING_BUG)
# cherokee/services-server.c
#
e('SERVER_ALREADY_INIT',
title = "Could not initialise service server, already initialised.",
desc = CODING_BUG)
e('SERVER_CANNOT_SOCKETPAIR',
title = "Could not create socket pair for service server: ${errno}",
desc = SYSTEM_ISSUE)
# cherokee/http.c
#
e('HTTP_UNKNOWN_CODE',
title = "Unknown HTTP status code %d")
# cherokee/icons.c
#
e('ICONS_NO_DEFAULT',
title = "A default icon is needed",
desc = "Please, specify a default icon. It is the icon that Cherokee will use whenever no other icon is used.",
admin = "/general#Icons-4")
e('ICONS_ASSIGN_SUFFIX',
title = "Could not assign suffix '%s' to file '%s'",
desc = UNKNOWN_CAUSE,
admin = "/general#Icons-4")
e('ICONS_DUP_SUFFIX',
title = "Duped suffix (case insensitive) '%s', pointing to '%s'",
desc = UNKNOWN_CAUSE,
admin = "/general#Icons-4")
# cherokee/header.c
#
e('HEADER_EMPTY',
title = "Calling cherokee_header_parse() with an empty header",
desc = CODING_BUG)
e('HEADER_NO_EOH',
title = "Could not find the End Of Header",
desc = CODING_BUG,
debug = "len=%d, buf=%s")
e('HEADER_TOO_MANY_CRLF',
title = "Too many initial CRLF",
desc = CODING_BUG)
e('HEADER_ADD_HEADER',
title = "Failed to store a header entry while parsing",
desc = CODING_BUG)
# cherokee/socket.c
#
e('SOCKET_NO_IPV6',
title = "IPv6 support is disabled. Configuring for IPv4 support only.",
desc = SYSTEM_ISSUE,
admin = "/general#Network-4",
show_bt = False)
e('SOCKET_NEW_SOCKET',
title = "Could not create socket: ${errno}",
desc = SYSTEM_ISSUE)
e('SOCKET_SET_LINGER',
title = "Could not set SO_LINGER on fd=%d: ${errno}",
desc = CODING_BUG)
e('SOCKET_RM_NAGLES',
title = "Could not disable Nagle's algorithm",
desc = SYSTEM_ISSUE)
e('SOCKET_NON_BLOCKING',
title = "Could not set non-blocking, fd %d",
desc = CODING_BUG)
e('SOCKET_NO_SOCKET',
title = "%s is not a socket",
desc = "The file is supposed to be a Unix socket, although it does not look like one.")
e('SOCKET_REMOVE',
title = "Could not remove %s",
desc = "Could not remove the Unix socket because: ${errno}")
e('SOCKET_WRITE',
title = "Could not write to socket: write(%d, ..): '${errno}'",
desc = CODING_BUG)
e('SOCKET_READ',
title = "Could not read from socket: read(%d, ..): '${errno}'",
desc = CODING_BUG)
e('SOCKET_WRITEV',
title = "Could not write a vector to socket: writev(%d, ..): '${errno}'",
desc = CODING_BUG)
e('SOCKET_CONNECT',
title = "Could not connect: ${errno}",
desc = SYSTEM_ISSUE)
e('SOCKET_BAD_FAMILY',
title = "Unknown socket family: %d",
desc = CODING_BUG)
e('SOCKET_SET_NODELAY',
title = "Could not set TCP_NODELAY to fd %d: ${errno}",
desc = CODING_BUG)
e('SOCKET_RM_NODELAY',
title = "Could not remove TCP_NODELAY from fd %d: ${errno}",
desc = CODING_BUG)
e('SOCKET_SET_CORK',
title = "Could not set TCP_CORK to fd %d: ${errno}",
desc = CODING_BUG)
e('SOCKET_RM_CORK',
title = "Could not set TCP_CORK from fd %d: ${errno}",
desc = CODING_BUG)
# cherokee/thread.c
#
e('THREAD_RM_FD_POLL',
title = "Could not remove fd(%d) from fdpoll",
desc = CODING_BUG)
e('THREAD_HANDLER_RET',
title = "Unknown ret %d from handler %s",
desc = CODING_BUG)
e('THREAD_OUT_OF_FDS',
title = "Run out of file descriptors",
desc = "The server is under heavy load and it has run out of file descriptors. It can be fixed by raising the file descriptor limit and restarting the server.",
admin = "/advanced")
e('THREAD_GET_CONN_OBJ',
title = "Trying to get a new connection object",
desc = "Either the system run out of memory, or you've hit a bug in the code.")
e('THREAD_SET_SOCKADDR',
title = "Could not set sockaddr",
desc = CODING_BUG)
e('THREAD_CREATE',
title = "Could not create a system thread: '${errno}'",
desc = "This is a extremely unusual error. For some reason your system could not create a thread while launching the server. You might have hit some system restriction.",
debug = "pthread_create() error = %d")
# cherokee/connection.c
#
e('CONNECTION_AUTH',
title = "Unknown authentication method",
desc = BROKEN_CONFIG)
e('CONNECTION_LOCAL_DIR',
title = "Could not build the local directory string",
desc = CODING_BUG)
e('CONNECTION_GET_VSERVER',
title = "Could not get virtual server: '%s'",
desc = CODING_BUG)
# cherokee/ncpus.c
#
e('NCPUS_PSTAT',
title = "pstat_getdynamic() failed: '${errno}'",
desc = SYSTEM_ISSUE)
e('NCPUS_HW_NCPU',
title = "sysctl(CTL_HW:HW_NCPU) failed: '${errno}'",
desc = SYSTEM_ISSUE)
e('NCPUS_SYSCONF',
title = "sysconf(_SC_NPROCESSORS_ONLN) failed: '${errno}'",
desc = SYSTEM_ISSUE)
# cherokee/init.c
#
e('INIT_CPU_NUMBER',
title = "Could not figure the CPU/core number of your server. Read %d, set to 1")
e('INIT_GET_FD_LIMIT',
title = "Could not get the file descriptor limit of your system",
desc = SYSTEM_ISSUE)
# cherokee/utils.c
#
e('UTIL_F_GETFL',
title = "fcntl (F_GETFL, fd=%d, 0): ${errno}",
desc = CODING_BUG)
e('UTIL_F_SETFL',
title = "fcntl (F_GETFL, fd=%d, flags=%d (+%s)): ${errno}",
desc = CODING_BUG)
e('UTIL_F_GETFD',
title = "fcntl (F_GETFD, fd=%d, 0): ${errno}",
desc = CODING_BUG)
e('UTIL_F_SETFD',
title = "fcntl (F_GETFD, fd=%d, flags=%d (+%s)): ${errno}",
desc = CODING_BUG)
e('UTIL_MKDIR',
title = "Could not mkdir '%s' (UID %d): ${errno}",
desc = "Most probably there you have to adjust some permissions.")
# cherokee/avl.c
#
e('AVL_PREVIOUS',
title = "AVL Tree inconsistency: Right child",
desc = CODING_BUG)
e('AVL_NEXT',
title = "AVL Tree inconsistency: Left child",
desc = CODING_BUG)
e('AVL_BALANCE',
title = "AVL Tree inconsistency: Balance",
desc = CODING_BUG)
# cherokee/buffer.c
#
e('BUFFER_NEG_ESTIMATION',
title = "Buffer: Bad memory estimation. The format '%s' estimated a negative length: %d.",
desc = CODING_BUG)
e('BUFFER_NO_SPACE',
title = "Buffer: No target memory. The format '%s' got a free size of %d (estimated %d).",
desc = CODING_BUG)
e('BUFFER_BAD_ESTIMATION',
title = "Buffer: Bad estimation. Too few memory: '%s' -> '%s', esti=%d real=%d size=%d.",
desc = CODING_BUG)
e('BUFFER_AVAIL_SIZE',
title = "Buffer: Bad estimation: Estimation=%d, needed=%d available size=%d: %s.",
desc = CODING_BUG)
e('BUFFER_OPEN_FILE',
title = "Could not open the file: %s, ${errno}",
desc = "Please check that the file exists and the server has read access.")
e('BUFFER_READ_FILE',
title = "Could not read from fd: read(%d, %d, ..) = ${errno}",
desc = "Please check that the file exists and the server has read access.")
# cherokee/plugin_loader.c
#
UNAVAILABLE_PLUGIN = """Either you are trying to use an unavailable
(uninstalled?) plugin, or there is a installation issue."""
e('PLUGIN_LOAD_NO_SYM',
title = "Could not get simbol '%s': %s",
desc = INTERNAL_ISSUE)
e('PLUGIN_DLOPEN',
title = "Something just happened while opening a plug-in file",
desc = "The operating system reported '%s' while trying to load '%s'.")
e('PLUGIN_NO_INIT',
title = "The plug-in initialization function (%s) could not be found",
desc = CODING_BUG)
e('PLUGIN_NO_OPEN',
title = "Could not open the '%s' module",
desc = UNAVAILABLE_PLUGIN)
e('PLUGIN_NO_INFO',
title = "Could not access the 'info' entry of the %s plug-in",
desc = UNAVAILABLE_PLUGIN)
# cherokee/virtual_server.c
#
e('VSERVER_BAD_METHOD',
title = "Unsupported method '%s'",
admin = "/vserver/%d/rule/%d",
desc = "For some reason the configuration file is trying to use an invalid authentication method. Either the file has been tampered with, or you are using a legacy configuration from a system that was compiled with support for more authentication methods.")
e('VSERVER_TIME_MISSING',
title = "Expiration time without a 'time' property",
admin = "/vserver/%d/rule/%d",
desc = "The expiration time feature is being used but no amount of time has been specified. Either provide on or disable Expiration.")
e('VSERVER_RULE_UNKNOWN_KEY',
title = "Virtual Server Rule, Unknown key '%s'",
admin = "/vserver/%d/rule/%d",
desc = "Most probably you are using an old configuration file that contains a deprecated key. Loading and then saving it through Cherokee-Admin should update the old entries for you automatically.")
e('VSERVER_TYPE_MISSING',
title = "Rule matches must specify a 'type' property",
admin = "/vserver/%d/rule/%d",
desc = "For some reason the rule is incomplete. Try editing or recreating it within Cherokee-Admin.")
e('VSERVER_LOAD_MODULE',
title = "Could not load rule module '%s'",
admin = "/vserver/%d",
desc = "The server could not load a plug-in file. This might be due to some problem in the installation.")
e('VSERVER_BAD_PRIORITY',
title = "Invalid priority '%s'",
admin = "/vserver/%d",
desc = "For some reason your configuration file contains invalid priority values, which must be an integer higher than 0. Most likely it has been edited by hand and the value must be fixed manually or the rule has to be discarded.")
e('VSERVER_RULE_MATCH_MISSING',
title = "Rules must specify a 'match' property",
admin = "/vserver/%d/rule/%d",
desc = "For some reason there is an incomplete rule in your configuration file. Try locating it in Cherokee-Admin and fill in all the mandatory fields.")
e('VSERVER_MATCH_MISSING',
title = "Virtual Server must specify a 'match' property",
admin = "/vserver/%d#Host_Match-2",
desc = "Try filling in the fields under the 'Host Match' tab.")
e('VSERVER_UNKNOWN_KEY',
title = "Virtual Server, Unknown key '%s'",
admin = "/vserver/%d",
desc = "Most probably you are using an old configuration file that contains a deprecated key. Loading and then saving it through Cherokee-Admin should update the old entries for you automatically.")
e('VSERVER_NICK_MISSING',
title = "Virtual Server without a 'nick' property",
admin = "/vserver/%d#Basics-1",
desc = "For some reason, a mandatory property is not present in your configuration. Fill in the 'Virtual Server nickname' field, under the 'Basics' tab.")
e('VSERVER_DROOT_MISSING',
title = "Virtual Server without a 'document_root' property",
admin = "/vserver/%d#Basics-1",
desc = "You seem to have forgotten to provide a valid Document Root. This is the root path that contains the files and directories that will be made publicly available through the web server. It can be an empty path and even /dev/null, but it is a mandatory property.")
e('VSERVER_FLCACHE_UNKNOWN_POLICY',
title = "Unknown Front-Line Cache caching policy: %s",
admin = "/vserver/%d/rule/%d",
desc = BROKEN_CONFIG)
# cherokee/regex.c
#
e('REGEX_COMPILATION',
title = "Could not compile <<%s>>: %s (offset=%d)",
desc = "For some reason, PCRE could not compile the regular expression. Please modify the regular expression in order to solve this problem.")
# cherokee/access.c
#
e('ACCESS_IPV4_MAPPED',
title = "This IP '%s' is IPv6-mapped IPv6 address",
desc = "It can be solved by specifying the IP in IPv4 style: a.b.c.d, instead of IPv6 style: ::ffff:a.b.c.d style")
e('ACCESS_INVALID_IP',
title = "The IP address '%s' seems to be invalid",
desc = "You must have made a mistake. Please, try to fix the IP and try again.")
e('ACCESS_INVALID_MASK',
title = "The network mask '%s' seems to be invalid",
desc = "You must have made a mistake. Please, try to fix the IP and try again.")
# cherokee/bind.c
#
e('BIND_PORT_NEEDED',
title = "A port entry is need",
desc = "It seems that the configuration file includes a port listening entry with the wrong format. It should contain one port specification, but it does not in this case.",
admin = "/general#Ports_to_listen-2")
e('BIND_COULDNT_BIND_PORT',
title = "Could not bind() port=%d (UID=%d, GID=%d)",
desc = "Most probably there is another web server listening to the same port. You will have to shut it down before launching Cherokee. It could also be a permissions issue as well. Remember that non-root user cannot listen to ports < 1024.",
admin = "/general#Ports_to_listen-2")
# cherokee/handler_rrd.c
#
e('HANDLER_RENDER_RRD_EXEC',
title = "Could not execute RRD command: %s",
desc = SYSTEM_ISSUE)
e('HANDLER_RENDER_RRD_EMPTY_REPLY',
title = "RRDtool empty response",
desc = SYSTEM_ISSUE)
e('HANDLER_RENDER_RRD_MSG',
title = "RRDtool replied an error message: %s",
desc = SYSTEM_ISSUE)
e('HANDLER_RENDER_RRD_INVALID_REQ',
title = "Invalid request: %s",
desc = SYSTEM_ISSUE)
# cherokee/collector_rrd.c
#
e('COLLECTOR_COMMAND_EXEC',
title = "Could not execute RRD command: %s",
desc = SYSTEM_ISSUE,
admin = "/general#Network-1")
e('COLLECTOR_NEW_THREAD',
title = "Could not create the RRD working thread: error=%d",
desc = SYSTEM_ISSUE)
e('COLLECTOR_NEW_MUTEX',
title = "Could not create the RRD working mutex: error=%d",
desc = SYSTEM_ISSUE)
# cherokee/validator_mysql.c
#
e('VALIDATOR_MYSQL_HASH',
title = "Validator MySQL: Unknown hash type: '%s'",
desc = CODING_BUG)
e('VALIDATOR_MYSQL_KEY',
title = "Validator MySQL: Unknown key: '%s'",
desc = CODING_BUG)
e('VALIDATOR_MYSQL_USER',
title = "MySQL validator: a 'user' entry is needed",
desc = "Make sure that a valid MySQL user-name has been provided.")
e('VALIDATOR_MYSQL_DATABASE',
title = "MySQL validator: a 'database' entry is needed",
desc = "Make sure that a valid MySQL database-name has been provided.")
e('VALIDATOR_MYSQL_QUERY',
title = "MySQL validator: a 'query' entry is needed",
desc = "Make sure that a MySQL query has been provided.")
e('VALIDATOR_MYSQL_SOURCE',
title = "MySQL validator misconfigured: A Host or Unix socket is needed.",
desc = "Make sure that a working database host is specified for MySQL validation.")
e('VALIDATOR_MYSQL_NOCONN',
title = "Unable to connect to MySQL server: %s:%d %s",
desc = "Most probably the MySQL server is down or you've mistyped a connetion parameter")
# cherokee/error_log.c
#
e('ERRORLOG_PARAM',
title = "Unknown parameter type '%c'",
desc = "Accepted parameter are 's' and 'd'")
# cherokee/cryptor_libssl.c
#
e('SSL_NO_ENTROPY',
title = "Not enough entropy in the pool",
desc = SYSTEM_ISSUE)
e('SSL_SOCKET',
title = "Could not get the socket struct: %p",
desc = SYSTEM_ISSUE)
e('SSL_SRV_MATCH',
title = "Servername did not match: '%s'",
desc = "A TLS negotiation using SNI is sending a domain name that does not match any of the available ones. This makes it impossible to present a certificate with a correct CA. Check the list of TLS enabled Virtual Servers if you expect otherwise.")
e('SSL_CHANGE_CTX',
title = "Could not change the SSL context: servername='%s'",
desc = SYSTEM_ISSUE)
e('SSL_ALLOCATE_CTX',
title = "OpenSSL: Could not allocate OpenSSL context",
desc = SYSTEM_ISSUE)
e('SSL_CIPHER',
title = "OpenSSL: cannot set cipher list '%s': %s",
desc = SYSTEM_ISSUE)
e('SSL_CERTIFICATE',
title = "OpenSSL: cannot use certificate file '%s': %s",
desc = "An error occured while trying to load a certificate into the SSL context structure. Most likely the certificate file is wrong or has been corrupted.")
e('SSL_KEY',
title = "OpenSSL: cannot use private key file '%s': %s",
desc = "An error occured while trying to load a private key the SSL context structure. Most likely the file is wrong or has been corrupted.")
e('SSL_KEY_MATCH',
title = "OpenSSL: Private key does not match the certificate public key",
desc = "The private key must agree with the corresponding public key in the certificate associated with a specific SSL context. Double check both private key and certificate.")
e('SSL_CA_READ',
title = "OpenSSL: cannot read trusted CA list '%s': %s",
desc = "If this happens, CA certificates for verification purposes cannot be located. It is likely there is a problem with your private key.")
e('SSL_CA_LOAD',
title = "SSL_load_client_CA_file '%s': %s",
desc = "A file of PEM formatted certificates should be read to extract data of the certificates found. It is likely there is a problem with your private key.")
e('SSL_SESSION_ID',
title = "Unable to set SSL session-id context for '%s': %s",
desc = SYSTEM_ISSUE)
e('SSL_SNI',
title = "Could not activate TLS SNI for '%s': %s",
desc = "It looks like Cherokee was compiled with TLS SNI support. However, it is currently using a SSL library (libssl/openssl) without TLS SNI support, and thus SNI is disabled.")
e('SSL_CONNECTION',
title = "OpenSSL: Unable to create a new SSL connection from the SSL context: %s",
desc = SYSTEM_ISSUE)
e('SSL_FD',
title = "OpenSSL: cannot set fd(%d): %s",
desc = SYSTEM_ISSUE)
e('SSL_INIT',
title = "Init OpenSSL: %s",
desc = SYSTEM_ISSUE)
e('SSL_SW_DEFAULT',
title = "SSL_write: unknown errno: ${errno}",
desc = SYSTEM_ISSUE)
e('SSL_SW_ERROR',
title = "SSL_write (%d, ..) -> err=%d '%s'",
desc = SYSTEM_ISSUE)
e('SSL_SR_DEFAULT',
title = "SSL_read: unknown errno: ${errno}",
desc = SYSTEM_ISSUE)
e('SSL_SR_ERROR',
title = "OpenSSL: SSL_read (%d, ..) -> err=%d '%s'",
desc = SYSTEM_ISSUE)
e('SSL_CREATE_CTX',
title = "OpenSSL: Unable to create a new SSL context: %s",
desc = SYSTEM_ISSUE)
e('SSL_CTX_LOAD',
title = "OpenSSL: '%s': %s",
desc = SYSTEM_ISSUE)
e('SSL_CTX_SET',
title = "OpenSSL: cannot set certificate verification paths: %s",
desc = SYSTEM_ISSUE)
e('SSL_SNI_SRV',
title = "OpenSSL: Could not set SNI server name: %s",
desc = SYSTEM_ISSUE)
e('SSL_CONNECT',
title = "OpenSSL: cannot connect: %s",
desc = SYSTEM_ISSUE)
e('SSL_PKCS11',
title = "Could not init pkcs11 engine",
desc = SYSTEM_ISSUE)
e('SSL_DEFAULTS',
title = "Could not set all defaults",
desc = SYSTEM_ISSUE)
| 0.036192 |
import numpy as np
try:
isclose = np.isclose
except AttributeError:
def isclose(*args, **kwargs):
raise RuntimeError("You need numpy version 1.7 or greater to use "
"isclose.")
try:
full = np.full
except AttributeError:
def full(shape, fill_value, dtype=None, order=None):
"""Our implementation of numpy.full because your numpy is old."""
if order is not None:
raise NotImplementedError("`order` kwarg is not supported upgrade "
"to Numpy 1.8 or greater for support.")
return np.multiply(fill_value, np.ones(shape, dtype=dtype),
dtype=dtype)
# Taken from scikit-learn:
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/fixes.py#L84
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Divide with dtype doesn't work on Python 3
def divide(x1, x2, out=None, dtype=None):
"""Implementation of numpy.divide that works with dtype kwarg.
Temporary compatibility fix for a bug in numpy's version. See
https://github.com/numpy/numpy/issues/3484 for the relevant issue."""
x = np.divide(x1, x2, out)
if dtype is not None:
x = x.astype(dtype)
return x
| 0 |
# -*- coding: utf-8 -*-
import sys
from ctypes import *
from ctypes.util import find_library
from os.path import join, dirname, abspath, exists
import os
if find_library('mmseg'):
mmseg = CDLL(find_library('mmseg'))
else:
if sys.platform == 'win32':
mmseg = CDLL(os.path.join(os.path.dirname(__file__),"./mmseg.dll"))
else:
mmseg = CDLL(os.path.join(os.path.dirname(__file__),"./mmseg.so.64"))
##mmseg = cdll.LoadLibrary(ext)
#mmseg = CDLL(ext)
########################################
# the Token struct
########################################
class Token(Structure):
_fields_ = [('_text', c_void_p),
('_offset', c_int),
('_length', c_int)]
def text_get(self):
return string_at(self._text, self._length)
def text_set(self, value):
raise AttributeError('text attribute is read only')
text = property(text_get, text_set)
def start_get(self):
return self._offset
def start_set(self, value):
raise AttributeError('start attribute is read only')
start = property(start_get, start_set)
def end_get(self):
return self._offset+self._length
def end_set(self, value):
raise AttributeError('start attribute is read only')
end = property(end_get, end_set)
def length_get(self):
return self._length
def length_set(self):
raise AttributeError('length attribute is read only')
length = property(length_get, length_set)
def __repr__(self):
return '<Token %d..%d %s>' % (self.start,
self.end, self.text.__repr__())
def __str__(self):
return self.text
########################################
# Init function prototypes
########################################
mmseg.mmseg_load_chars.argtypes = [c_char_p]
mmseg.mmseg_load_chars.restype = c_int
mmseg.mmseg_load_words.argtypes = [c_char_p]
mmseg.mmseg_load_words.restype = c_int
mmseg.mmseg_dic_add.argtypes = [c_char_p, c_int, c_int]
mmseg.mmseg_dic_add.restype = None
mmseg.mmseg_algor_create.argtypes = [c_char_p, c_int]
mmseg.mmseg_algor_create.restype = c_void_p
mmseg.mmseg_algor_destroy.argtypes = [c_void_p]
mmseg.mmseg_algor_destroy.restype = None
mmseg.mmseg_next_token.argtypes = [c_void_p]
mmseg.mmseg_next_token.restype = Token
########################################
# Python API
########################################
def dict_load_chars(path):
res = mmseg.mmseg_load_chars(path)
if res == 0:
return False
return True
def dict_load_words(path):
res = mmseg.mmseg_load_words(path)
if res == 0:
return False
return True
def dict_load_defaults():
mmseg.mmseg_load_chars(join(dirname(__file__), '.', 'chars.dic'))
mmseg.mmseg_load_words(join(dirname(__file__), '.', 'words.dic'))
class Algorithm(object):
def __init__(self, text):
"""\
Create an Algorithm instance to segment text.
"""
self.text = text # add a reference to prevent the string buffer from
# being GC-ed
self.algor = mmseg.mmseg_algor_create(text, len(text))
self.destroied = False
def __iter__(self):
"""\
Iterate through all tokens. Note the iteration has
side-effect: an Algorithm object can only be iterated
once.
"""
while True:
tk = self.next_token()
if tk is None:
raise StopIteration
yield tk
def next_token(self):
"""\
Get next token. When no token available, return None.
"""
if self.destroied:
return None
tk = mmseg.mmseg_next_token(self.algor)
if tk.length == 0:
# no token available, the algorithm object
# can be destroied
self._destroy()
return None
else:
return tk
def _destroy(self):
if not self.destroied:
mmseg.mmseg_algor_destroy(self.algor)
self.destroied = True
def __del__(self):
self._destroy()
| 0.007733 |
import sys
import datetime
from django.core.management.base import BaseCommand
from ietf.community.models import Rule, CommunityList
class Command(BaseCommand):
help = (u"Update drafts in community lists by reviewing their rules")
def handle(self, *args, **options):
now = datetime.datetime.now()
rules = Rule.objects.filter(last_updated__lt=now - datetime.timedelta(hours=1))
count = rules.count()
index = 1
for rule in rules:
sys.stdout.write('Updating rule [%s/%s]\r' % (index, count))
sys.stdout.flush()
rule.save()
index += 1
if index > 1:
print
cls = CommunityList.objects.filter(cached__isnull=False)
count = cls.count()
index = 1
for cl in cls:
sys.stdout.write('Clearing community list cache [%s/%s]\r' % (index, count))
sys.stdout.flush()
cl.cached = None
cl.save()
index += 1
if index > 1:
print
| 0.00381 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
import urllib
import urllib2
import xbmcplugin
import xbmcaddon
import xbmcgui
import random
import sys
import os
import re
#addon = xbmcaddon.Addon()
#addonID = addon.getAddonInfo('id')
addonID = 'plugin.video.rtl_now'
addon = xbmcaddon.Addon(id=addonID)
socket.setdefaulttimeout(30)
pluginhandle = int(sys.argv[1])
xbox = xbmc.getCondVisibility("System.Platform.xbox")
addonDir = xbmc.translatePath(addon.getAddonInfo('path'))
addonUserDataFolder = xbmc.translatePath(addon.getAddonInfo('profile'))
channelFavsFile = os.path.join(addonUserDataFolder, addonID+".favorites")
iconRTL = os.path.join(addonDir, 'iconRTL.png')
iconRTL2 = os.path.join(addonDir, 'iconRTL2.png')
iconVOX = os.path.join(addonDir, 'iconVOX.png')
iconRTLNitro = os.path.join(addonDir, 'iconRTLNitro.png')
iconSuperRTL = os.path.join(addonDir, 'iconSuperRTL.png')
iconNTV = os.path.join(addonDir, 'iconNTV.png')
opener = urllib2.build_opener()
userAgent = "Mozilla/5.0 (Windows NT 5.1; rv:24.0) Gecko/20100101 Firefox/24.0"
opener.addheaders = [('User-Agent', userAgent)]
useNoThumbMode = addon.getSetting("useNoThumbMode") == "true"
useThumbAsFanart = addon.getSetting("useThumbAsFanart") == "true"
forceViewMode = addon.getSetting("forceView") == "true"
viewMode = str(addon.getSetting("viewID"))
site1 = addon.getSetting("site1") == "true"
site2 = addon.getSetting("site2") == "true"
site3 = addon.getSetting("site3") == "true"
site4 = addon.getSetting("site4") == "true"
site5 = addon.getSetting("site5") == "true"
site6 = addon.getSetting("site6") == "true"
urlMainRTL = "http://rtl-now.rtl.de"
urlMainRTL2 = "http://rtl2now.rtl2.de"
urlMainVOX = "http://www.voxnow.de"
urlMainRTLNitro = "http://www.rtlnitronow.de"
urlMainSuperRTL = "http://www.superrtlnow.de"
urlMainNTV = "http://www.n-tvnow.de"
if not os.path.isdir(addonUserDataFolder):
os.mkdir(addonUserDataFolder)
def index():
addDir(translation(30023), "", 'listShowsFavs', '', '')
if site1:
addDir(translation(30002), urlMainRTL, "listChannel", iconRTL)
if site2:
addDir(translation(30003), urlMainRTL2, "listChannel", iconRTL2)
if site3:
addDir(translation(30004), urlMainVOX, "listChannel", iconVOX)
if site4:
addDir(translation(30005), urlMainRTLNitro, "listChannel", iconRTLNitro)
if site5:
addDir(translation(30006), urlMainSuperRTL, "listChannel", iconSuperRTL)
if site6:
addDir(translation(30007), urlMainNTV, "listChannel", iconNTV)
xbmcplugin.endOfDirectory(pluginhandle)
def listChannel(urlMain, thumb):
if urlMain == urlMainRTL and not useNoThumbMode:
addDir(translation(30016), urlMain+"/newsuebersicht.php", "listShowsThumb", thumb)
addDir(translation(30015), urlMain+"/sendung_a_z.php", "listShowsThumb", thumb)
elif urlMain in [urlMainVOX, urlMainNTV, urlMainRTLNitro] and not useNoThumbMode:
addDir(translation(30014), urlMain+"/sendung_a_z.php", "listShowsThumb", thumb)
else:
addDir(translation(30014), urlMain, "listShowsNoThumb", thumb)
addDir(translation(30018), urlMain, "listVideosNew", thumb, "", "newlist")
addDir(translation(30017), urlMain, "listVideosNew", thumb, "", "tipplist")
addDir(translation(30019), urlMain, "listVideosNew", thumb, "", "top10list")
addDir(translation(30020), urlMain, "listVideosNew", thumb, "", "topfloplist")
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listShowsThumb(urlMain):
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
content = opener.open(urlMain).read()
spl = content.split('<div class="m03medium"')
entries = []
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('<h.>(.+?)</h.>', re.DOTALL).findall(entry)
title = cleanTitle(match[0])
match = re.compile('href="(.+?)"', re.DOTALL).findall(entry)
url = match[0]
if not url.startswith("http"):
if url.startswith("/"):
url = url[1:]
if "/" in url:
url = url[:url.find("/")]+".php"
url = urlMain[:urlMain.rfind("/")+1]+url
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumb = match[0].replace("/216x122/", "/864x488/")
if url not in entries:
addShowDir(title, url, 'listVideos', thumb)
entries.append(url)
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listShowsNoThumb(urlMain, thumb):
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
content = opener.open(urlMain).read()
spl = content.split('<div class="seriennavi')
entries = []
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
if match:
title = cleanTitle(match[0]).replace(" online ansehen", "")
match = re.compile('href="(.+?)"', re.DOTALL).findall(entry)
if match and match[0].startswith("/"):
url = urlMain+match[0]
if '>FREE<' in entry or '>NEW<' in entry:
if url not in entries:
addShowDir(title, url, 'listVideos', thumb)
entries.append(url)
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listShowsFavs():
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
if os.path.exists(channelFavsFile):
fh = open(channelFavsFile, 'r')
all_lines = fh.readlines()
for line in all_lines:
title = line[line.find("###TITLE###=")+12:]
title = title[:title.find("#")]
url = line[line.find("###URL###=")+10:]
url = url[:url.find("#")]
thumb = line[line.find("###THUMB###=")+12:]
thumb = thumb[:thumb.find("#")]
addShowRDir(title, urllib.unquote_plus(url), "listVideos", thumb)
fh.close()
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listSeasons(urlMain, thumb):
content = opener.open(urlMain).read()
matchUrl = re.compile('xajaxRequestUri="(.+?)"', re.DOTALL).findall(content)
ajaxUrl = matchUrl[0]
match = re.compile("onclick=\"currentreiter=.*?;show_top_and_movies_wrapper\\((.+?),'(.+?)','(.+?)',(.+?),(.+?),(.+?),'','(.+?)', '', '(.+?)'\\);.*?<div class=\"m\">(.+?)</div>", re.DOTALL).findall(content)
for id1, id2, id3, id4, id5, id6, id7, id8, title in match:
args = "xajax=show_top_and_movies&xajaxr=&xajaxargs[]="+id1+"&xajaxargs[]="+id2+"&xajaxargs[]="+id3+"&xajaxargs[]="+id4+"&xajaxargs[]="+id5+"&xajaxargs[]="+id6+"&xajaxargs[]="+id7+"&xajaxargs[]="+id8
addDir(title, ajaxUrl, 'listVideos', thumb, args)
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listVideos(urlMain, thumb, args=""):
ajaxUrl = ""
if not args:
content = opener.open(urlMain).read()
match = re.compile('<meta property="og:image" content="(.+?)"', re.DOTALL).findall(content)
if match and thumb.split(os.sep)[-1].startswith("icon"):
thumb = match[0]
matchUrl = re.compile('xajaxRequestUri="(.+?)"', re.DOTALL).findall(content)
ajaxUrl = matchUrl[0]
matchParams = re.compile("<select onchange=\"xajax_show_top_and_movies.+?'(.+?)','(.+?)','(.+?)','(.+?)','(.+?)','(.+?)','(.+?)'", re.DOTALL).findall(content)
if matchParams:
args = "xajax=show_top_and_movies&xajaxr=&xajaxargs[]=0&xajaxargs[]="+matchParams[0][0]+"&xajaxargs[]="+matchParams[0][1]+"&xajaxargs[]="+matchParams[0][2]+"&xajaxargs[]="+matchParams[0][3]+"&xajaxargs[]="+matchParams[0][4]+"&xajaxargs[]="+matchParams[0][5]+"&xajaxargs[]="+matchParams[0][6]
content = opener.open(ajaxUrl, args).read()
else:
content = opener.open(urlMain, args).read()
spl = content.split('<div class="line')
count = 0
for i in range(1, len(spl), 1):
entry = spl[i]
if 'class="minibutton">kostenlos<' in entry:
match = re.compile('title=".+?">(.+?)<', re.DOTALL).findall(entry)
if match:
title = cleanTitle(match[0])
match = re.compile('class="time"><div style=".+?">.+?</div>(.+?)<', re.DOTALL).findall(entry)
date = ""
if match:
date = match[0].strip()
if " " in date:
date = date.split(" ")[0]
if "." in date:
date = date[:date.rfind(".")]
title = date+" - "+title
match = re.compile('href="(.+?)"', re.DOTALL).findall(entry)
url = urlMain[:urlMain.rfind("/")]+match[0].replace("&", "&")
addLink(title, url, 'playVideo', thumb)
count+=1
matchParams = re.compile("<a class=\"sel\" >.+?xajax_show_top_and_movies\\((.+?),'(.+?)','(.+?)','(.+?)','(.+?)','(.+?)','(.+?)','(.+?)'", re.DOTALL).findall(content)
if matchParams and count==20:
args = "xajax=show_top_and_movies&xajaxr=&xajaxargs[]="+matchParams[0][0]+"&xajaxargs[]="+matchParams[0][1]+"&xajaxargs[]="+matchParams[0][2]+"&xajaxargs[]="+matchParams[0][3]+"&xajaxargs[]="+matchParams[0][4]+"&xajaxargs[]="+matchParams[0][5]+"&xajaxargs[]="+matchParams[0][6]+"&xajaxargs[]="+matchParams[0][7]
if ajaxUrl:
ajaxUrlNext = ajaxUrl
else:
ajaxUrlNext = urlMain
addDir(translation(30001), ajaxUrlNext, "listVideos", thumb, args)
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listVideosNew(urlMain, type):
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
content = opener.open(urlMain).read()
content = content[content.find('<div id="'+type+'"'):]
if type == "tipplist":
if urlMain == urlMainNTV:
content = content[:content.find("iv class=\"contentrow contentrow3\"><div class='contentrow_headline'")]
else:
content = content[:content.find("<div class='contentrow_headline'")]
spl = content.split('<div class="m03medium"')
else:
content = content[:content.find('<div class="roundfooter"></div>')]
spl = content.split('<div class="top10 ')
for i in range(1, len(spl), 1):
entry = spl[i]
match1 = re.compile('<h.>(.+?)</h.>', re.DOTALL).findall(entry)
match2 = re.compile('alt="(.+?)"', re.DOTALL).findall(entry)
if match1:
title = cleanTitle(match1[0])
elif match2:
title = cleanTitle(match2[0])
title = title.replace("<br>", ": ")
match = re.compile('href="(.+?)"', re.DOTALL).findall(entry)
url = match[0].replace("&", "&")
if not urlMain in url:
url = urlMain+url
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumb = match[0].replace("/216x122/", "/864x488/")
if 'class="m03date">FREE' in entry or 'FREE |' in entry:
addLink(title, url, 'playVideo', thumb)
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def playVideo(urlMain):
content = opener.open(urlMain).read()
if "<div>DAS TUT UNS LEID!</div>" in content:
xbmc.executebuiltin('XBMC.Notification(Info:,'+str(translation(30022))+',10000)')
else:
match = re.compile("data:'(.+?)'", re.DOTALL).findall(content)
hosterURL = urlMain[urlMain.find("//")+2:]
hosterURL = hosterURL[:hosterURL.find("/")]
url = "http://"+hosterURL+urllib.unquote(match[0])
content = opener.open(url).read()
match = re.compile('<filename.+?><(.+?)>', re.DOTALL).findall(content)
url = match[0].replace("![CDATA[", "")
matchRTMPE = re.compile('rtmpe://(.+?)/(.+?)/(.+?)]', re.DOTALL).findall(url)
matchHDS = re.compile('http://(.+?)/(.+?)/(.+?)/(.+?)/(.+?)\\?', re.DOTALL).findall(url)
finalUrl = ""
if matchRTMPE:
playpath = matchRTMPE[0][2]
if ".flv" in playpath:
playpath = playpath[:playpath.rfind('.')]
else:
playpath = "mp4:"+playpath
finalUrl = "rtmpe://"+matchRTMPE[0][0]+"/"+matchRTMPE[0][1]+"/ playpath="+playpath+" swfVfy=1 swfUrl=http://"+hosterURL+"/includes/vodplayer.swf app="+matchRTMPE[0][1]+"/_definst_ tcUrl=rtmpe://"+matchRTMPE[0][0]+"/"+matchRTMPE[0][1]+"/ pageUrl="+urlMain
elif matchHDS:
finalUrl = "rtmpe://fms-fra"+str(random.randint(1, 34))+".rtl.de/"+matchHDS[0][2]+"/ playpath=mp4:"+matchHDS[0][4].replace(".f4m", "")+" swfVfy=1 swfUrl=http://"+hosterURL+"/includes/vodplayer.swf app="+matchHDS[0][2]+"/_definst_ tcUrl=rtmpe://fms-fra"+str(random.randint(1, 34))+".rtl.de/"+matchHDS[0][2]+"/ pageUrl="+urlMain
if finalUrl:
listitem = xbmcgui.ListItem(path=finalUrl)
xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
def queueVideo(url, name):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
listitem = xbmcgui.ListItem(name)
playlist.add(url, listitem)
def translation(id):
return addon.getLocalizedString(id).encode('utf-8')
def favs(param):
mode = param[param.find("###MODE###=")+11:]
mode = mode[:mode.find("###")]
channelEntry = param[param.find("###TITLE###="):]
if mode == "ADD":
if os.path.exists(channelFavsFile):
fh = open(channelFavsFile, 'r')
content = fh.read()
fh.close()
if content.find(channelEntry) == -1:
fh = open(channelFavsFile, 'a')
fh.write(channelEntry+"\n")
fh.close()
else:
fh = open(channelFavsFile, 'a')
fh.write(channelEntry+"\n")
fh.close()
elif mode == "REMOVE":
refresh = param[param.find("###REFRESH###=")+14:]
refresh = refresh[:refresh.find("#")]
fh = open(channelFavsFile, 'r')
content = fh.read()
fh.close()
entry = content[content.find(channelEntry):]
fh = open(channelFavsFile, 'w')
fh.write(content.replace(channelEntry+"\n", ""))
fh.close()
if refresh == "TRUE":
xbmc.executebuiltin("Container.Refresh")
def cleanTitle(title):
title = title.replace("u0026", "&").replace("<", "<").replace(">", ">").replace("&", "&").replace("'", "'").replace(""", "\"").replace("ß", "ß").replace("–", "-")
title = title.replace("Ä", "Ä").replace("Ü", "Ü").replace("Ö", "Ö").replace("ä", "ä").replace("ü", "ü").replace("ö", "ö")
title = title.replace("\\'", "'").strip()
return title
def getPluginUrl():
if xbox:
return "plugin://video/"+addon.getAddonInfo('name')
else:
return "plugin://"+addonID
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def addLink(name, url, mode, iconimage, desc="", duration="", date=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name, "Plot": desc, "Aired": date, "Duration": duration, "Episode": 1})
liz.setProperty('IsPlayable', 'true')
if useThumbAsFanart and not iconimage.split(os.sep)[-1].startswith("icon"):
liz.setProperty("fanart_image", iconimage)
entries = []
entries.append((translation(30021), 'RunPlugin('+getPluginUrl()+'/?mode=queueVideo&url='+urllib.quote_plus(u)+'&name='+urllib.quote_plus(name)+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
def addDir(name, url, mode, iconimage, args="", type=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&args="+urllib.quote_plus(args)+"&type="+urllib.quote_plus(type)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name})
if useThumbAsFanart and not iconimage.split(os.sep)[-1].startswith("icon"):
liz.setProperty("fanart_image", iconimage)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addShowDir(name, url, mode, iconimage, args="", type=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&args="+urllib.quote_plus(args)+"&type="+urllib.quote_plus(type)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name})
if useThumbAsFanart and not iconimage.split(os.sep)[-1].startswith("icon"):
liz.setProperty("fanart_image", iconimage)
playListInfos = "###MODE###=ADD###TITLE###="+name+"###URL###="+urllib.quote_plus(url)+"###THUMB###="+iconimage+"###END###"
liz.addContextMenuItems([(translation(30026), 'Container.Update('+getPluginUrl()+'/?mode=listSeasons&url='+urllib.quote_plus(url)+"&thumb="+urllib.quote_plus(iconimage)+')',), (translation(30024), 'RunPlugin('+getPluginUrl()+'/?mode=favs&url='+urllib.quote_plus(playListInfos)+')',)])
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addShowRDir(name, url, mode, iconimage, args="", type=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&args="+urllib.quote_plus(args)+"&type="+urllib.quote_plus(type)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name})
if useThumbAsFanart and not iconimage.split(os.sep)[-1].startswith("icon"):
liz.setProperty("fanart_image", iconimage)
playListInfos = "###MODE###=REMOVE###REFRESH###=TRUE###TITLE###="+name+"###URL###="+urllib.quote_plus(url)+"###THUMB###="+iconimage+"###END###"
liz.addContextMenuItems([(translation(30026), 'Container.Update('+getPluginUrl()+'/?mode=listSeasons&url='+urllib.quote_plus(url)+"&thumb="+urllib.quote_plus(iconimage)+')',), (translation(30025), 'RunPlugin('+getPluginUrl()+'/?mode=favs&url='+urllib.quote_plus(playListInfos)+')',)])
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
params = parameters_string_to_dict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode', ''))
url = urllib.unquote_plus(params.get('url', ''))
name = urllib.unquote_plus(params.get('name', ''))
args = urllib.unquote_plus(params.get('args', ''))
thumb = urllib.unquote_plus(params.get('thumb', ''))
type = urllib.unquote_plus(params.get('type', ''))
if mode == 'listChannel':
listChannel(url, thumb)
elif mode == 'listVideos':
listVideos(url, thumb, args)
elif mode == 'listSeasons':
listSeasons(url, thumb)
elif mode == 'listVideosNew':
listVideosNew(url, type)
elif mode == 'listShowsThumb':
listShowsThumb(url)
elif mode == 'listShowsNoThumb':
listShowsNoThumb(url, thumb)
elif mode == 'playVideo':
playVideo(url)
elif mode == "queueVideo":
queueVideo(url, name)
elif mode == 'listShowsFavs':
listShowsFavs()
elif mode == 'favs':
favs(url)
else:
index()
| 0.002712 |
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests to the conductor service."""
from oslo_log import log as logging
import oslo_messaging as messaging
from nova import baserpc
from nova.conductor import rpcapi
import nova.conf
from nova.i18n import _LI, _LW
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self.conductor_rpcapi = rpcapi.ConductorAPI()
self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic)
def object_backport_versions(self, context, objinst, object_versions):
return self.conductor_rpcapi.object_backport_versions(context, objinst,
object_versions)
def wait_until_ready(self, context, early_timeout=10, early_attempts=10):
'''Wait until a conductor service is up and running.
This method calls the remote ping() method on the conductor topic until
it gets a response. It starts with a shorter timeout in the loop
(early_timeout) up to early_attempts number of tries. It then drops
back to the globally configured timeout for rpc calls for each retry.
'''
attempt = 0
timeout = early_timeout
# if we show the timeout message, make sure we show a similar
# message saying that everything is now working to avoid
# confusion
has_timedout = False
while True:
# NOTE(danms): Try ten times with a short timeout, and then punt
# to the configured RPC timeout after that
if attempt == early_attempts:
timeout = None
attempt += 1
# NOTE(russellb): This is running during service startup. If we
# allow an exception to be raised, the service will shut down.
# This may fail the first time around if nova-conductor wasn't
# running when this service started.
try:
self.base_rpcapi.ping(context, '1.21 GigaWatts',
timeout=timeout)
if has_timedout:
LOG.info(_LI('nova-conductor connection '
'established successfully'))
break
except messaging.MessagingTimeout:
has_timedout = True
LOG.warning(_LW('Timed out waiting for nova-conductor. '
'Is it running? Or did this service start '
'before nova-conductor? '
'Reattempting establishment of '
'nova-conductor connection...'))
class ComputeTaskAPI(object):
"""ComputeTask API that queues up compute tasks for nova-conductor."""
def __init__(self):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True, request_spec=None):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown,
request_spec=request_spec)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit,
request_spec=None, async=False):
scheduler_hint = {'host': host_name}
if async:
self.conductor_compute_rpcapi.live_migrate_instance(
context, instance, scheduler_hint, block_migration,
disk_over_commit, request_spec)
else:
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, True, False, None,
block_migration, disk_over_commit, None,
request_spec=request_spec)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
self.conductor_compute_rpcapi.build_instances(context,
instances=instances, image=image,
filter_properties=filter_properties,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
def schedule_and_build_instances(self, context, build_requests,
request_spec, image,
admin_password, injected_files,
requested_networks, block_device_mapping):
self.conductor_compute_rpcapi.schedule_and_build_instances(
context, build_requests, request_spec, image,
admin_password, injected_files, requested_networks,
block_device_mapping)
def unshelve_instance(self, context, instance, request_spec=None):
self.conductor_compute_rpcapi.unshelve_instance(context,
instance=instance, request_spec=request_spec)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None,
request_spec=None, kwargs=None):
# kwargs unused but required for cell compatibility
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host,
request_spec=request_spec)
| 0.003331 |
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Sample database.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'micropages_example_db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Location of example files.
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ee0fqx8i$6g0rk3igym7mr^t4pw33-jrcnxd4*sv9yriv&-*fo'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'micropages.templates.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
# Django Micropages.
'micropages'
) | 0.000891 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
build = "XXX项目 xxx 版本"
url = ""
desired_cap = {}
wait_seconds = 0
driver = webdriver.Chrome();
wait = WebDriverWait(driver, wait_seconds)
def remoteDriver(url):
desired_cap["project"] = project
desired_cap["build"] = build
driver = webdriver.Remote(url, desired_capabilities = desired_cap)
wait = WebDriverWait(driver, wait_seconds)
def goto(url):
driver.get(url)
def waitPage(title):
wait.until(EC.title_is(title))
def switchToWindow(title):
handles = driver.window_handles
while len(handles) == 1:
handles = driver.window_handles
driver.switch_to_window(handles[len(handles) - 1])
try:
waitPage(title)
except TimeoutException:
driver.switch_to_window(handles[len(handles) - 2])
def switchToLightbox(path):
wait.until(EC.visibility_of_element_located((By.XPATH, path)))
def switchToFrame(name):
wait.until(EC.visibility_of_element_located((By.ID, name)))
driver.switch_to_frame(name)
def getXPathByDynamicId(tag, value):
return '//' + tag + '[contains(@id, "' + value +'")]'
def getXPathByText(tag, value):
return '//' + tag + '[contains(., "' + value +'")]'
def click(path):
if wait_seconds > 0:
wait.until(EC.element_to_be_clickable((By.XPATH, path))).click()
else:
driver.find_element_by_xpath(path).click()
def rightClick(path):
ActionChains(driver).context_click(driver.find_element_by_xpath(path)).perform()
def doubleClick(path):
ActionChains(driver).double_click(driver.find_element_by_xpath(path)).perform()
def drag(sourcePath, targetPath):
source = driver.find_element_by_xpath(sourcePath)
target = driver.find_element_by_xpath(targetPath)
ActionChains(driver).drag_and_drop(source, target).perform()
def select(path, index):
if wait_seconds > 30:
clickElement(path)
Select(driver.find_element_by_xpath(path)).select_by_index(index)
elif wait_seconds > 20:
Select(wait.until(EC.visibility_of_element_located((By.XPATH, path)))).select_by_index(index)
else:
Select(driver.find_element_by_xpath(path)).select_by_index(index)
def inputText(path, text):
if wait_seconds > 0:
wait.until(EC.visibility_of_element_located((By.XPATH, path))).send_keys(text)
else:
driver.find_element_by_xpath(path).send_keys(text)
def getText(path):
if wait_seconds > 0:
return wait.until(EC.presence_of_element_located((By.XPATH, path))).text
else:
return driver.find_element_by_xpath(path).text
def pressCtrlX(key):
ActionChains(driver).key_down(Keys.CONTROL).send_keys(key).key_up(Keys.CONTROL).perform()
def selectAll():
pressCtrlX('a')
def copy():
pressCtrlX('c')
def paste():
pressCtrlX('v')
| 0.009066 |
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print system memory information.
$ python examples/meminfo.py
MEMORY
------
Total : 9.7G
Available : 4.9G
Percent : 49.0
Used : 8.2G
Free : 1.4G
Active : 5.6G
Inactive : 2.1G
Buffers : 341.2M
Cached : 3.2G
SWAP
----
Total : 0B
Used : 0B
Free : 0B
Percent : 0.0
Sin : 0B
Sout : 0B
"""
import psutil
def bytes2human(n):
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def pprint_ntuple(nt):
for name in nt._fields:
value = getattr(nt, name)
if name != 'percent':
value = bytes2human(value)
print('%-10s : %7s' % (name.capitalize(), value))
def main():
print('MEMORY\n------')
pprint_ntuple(psutil.virtual_memory())
print('\nSWAP\n----')
pprint_ntuple(psutil.swap_memory())
if __name__ == '__main__':
main()
| 0.000673 |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import six
from yaql.language import specs
from yaql.language import utils
from yaql.language import yaqltypes
from murano.dsl import constants
from murano.dsl import dsl
from murano.dsl import dsl_types
from murano.dsl import exceptions
from murano.dsl import helpers
from murano.dsl import yaql_functions
from murano.dsl import yaql_integration
def _prepare_context():
@specs.parameter('name', yaqltypes.StringConstant())
def get_context_data(context, name):
root_context = context['#root_context']
def set_data(value):
if not name or name == '$' or name == '$this':
raise ValueError('Cannot assign to {0}'.format(name))
ctx = root_context
while constants.CTX_VARIABLE_SCOPE not in ctx:
ctx = ctx.parent
ctx[name] = value
return _Property(lambda: root_context[name], set_data)
@specs.parameter('this', _Property)
@specs.parameter('key', yaqltypes.Keyword())
def attribution(context, this, key):
def setter(src_property, value):
src = src_property.get()
if isinstance(src, utils.MappingType):
src_property.set(
utils.FrozenDict(
itertools.chain(
six.iteritems(src),
((key, value),))))
elif isinstance(src, dsl_types.MuranoObject):
src.set_property(key, value, context['#root_context'])
elif isinstance(src, (
dsl_types.MuranoTypeReference,
dsl_types.MuranoType)):
if isinstance(src, dsl_types.MuranoTypeReference):
mc = src.type
else:
mc = src
helpers.get_executor().set_static_property(
mc, key, value, context['#root_context'])
else:
raise ValueError(
'attribution may only be applied to '
'objects and dictionaries')
def getter(src):
if isinstance(src, utils.MappingType):
return src.get(key, {})
elif isinstance(src, dsl_types.MuranoObject):
try:
return src.get_property(key, context['#root_context'])
except exceptions.UninitializedPropertyAccessError:
return {}
elif isinstance(src, (
dsl_types.MuranoTypeReference,
dsl_types.MuranoType)):
if isinstance(src, dsl_types.MuranoTypeReference):
mc = src.type
else:
mc = src
return helpers.get_executor().get_static_property(
mc, key, context['#root_context'])
else:
raise ValueError(
'attribution may only be applied to '
'objects and dictionaries')
return _Property(
lambda: getter(this.get()),
lambda value: setter(this, value))
@specs.parameter('this', _Property)
@specs.parameter('index', yaqltypes.Lambda(with_context=True))
def indexation(context, this, index):
index = index(context['#root_context'])
def getter(src):
if utils.is_sequence(src):
return src[index]
else:
raise ValueError('indexation may only be applied to lists')
def setter(src_property, value):
src = src_property.get()
if utils.is_sequence(src):
src_property.set(src[:index] + (value,) + src[index + 1:])
elif isinstance(src, utils.MappingType):
attribution(src_property, index).set(value)
if isinstance(index, int):
return _Property(
lambda: getter(this.get()),
lambda value: setter(this, value))
else:
return attribution(context, this, index)
def _wrap_type_reference(tr, context):
return _Property(
lambda: tr, context['#self']._invalid_target)
@specs.parameter('prefix', yaqltypes.Keyword())
@specs.parameter('name', yaqltypes.Keyword())
@specs.name('#operator_:')
def ns_resolve(context, prefix, name):
return _wrap_type_reference(
yaql_functions.ns_resolve(context, prefix, name), context)
@specs.parameter('name', yaqltypes.Keyword())
@specs.name('#unary_operator_:')
def ns_resolve_unary(context, name):
return _wrap_type_reference(
yaql_functions.ns_resolve_unary(context, name), context)
@specs.parameter('object_', dsl_types.MuranoObject)
def type_(context, object_):
return _wrap_type_reference(yaql_functions.type_(object_), context)
@specs.name('type')
@specs.parameter('cls', dsl.MuranoTypeParameter())
def type_from_name(context, cls):
return _wrap_type_reference(cls, context)
res_context = yaql_integration.create_empty_context()
res_context.register_function(get_context_data, '#get_context_data')
res_context.register_function(attribution, '#operator_.')
res_context.register_function(indexation, '#indexer')
res_context.register_function(ns_resolve)
res_context.register_function(ns_resolve_unary)
res_context.register_function(type_)
res_context.register_function(type_from_name)
return res_context
class _Property(object):
def __init__(self, getter, setter):
self._getter = getter
self._setter = setter
def get(self):
return self._getter()
def set(self, value):
self._setter(value)
class LhsExpression(object):
lhs_context = _prepare_context()
def __init__(self, expression):
self._expression = expression
def _invalid_target(self, *args, **kwargs):
raise exceptions.InvalidLhsTargetError(self._expression)
def __call__(self, value, context):
new_context = LhsExpression.lhs_context.create_child_context()
new_context[''] = context['$']
new_context['#root_context'] = context
new_context['#self'] = self
for name in (constants.CTX_NAMES_SCOPE,):
new_context[name] = context[name]
prop = self._expression(context=new_context)
if not isinstance(prop, _Property):
self._invalid_target()
prop.set(value)
| 0 |
'''
Created on Jan 2, 2015
@author: Milos
'''
from django import template
from django.core.urlresolvers import reverse
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def commnets(milestone):
"""Filtrira milestone tako da nadje samo broj komentara za svaki pojedinacni"""
comments = milestone.event_set.filter(event_kind="K")
size = comments.count()
return size
@register.filter
def closedtasks(milestone):
"""Filtrira taskove, uzimajuci samo one koji su zatvoreni i vraca njihov broj"""
closed_tasks = milestone.task_set.filter(state_kind="Z")
size = closed_tasks.count()
return size
@register.filter
def percentage(milestone):
"""Filter koji racuina procentualno koliko je posla uradjeno"""
closed_tasks = milestone.task_set.filter(state_kind="Z")
part = closed_tasks.count()
all_tasks = milestone.task_set.all()
whole = all_tasks.count()
if(part != 0 and whole != 0):
return round(100 * float(part)/float(whole),2)
return 0
@register.filter
def showname(keyvalue):
"""filter koji za neku od prosledjenih kljuceva vraca vrednost"""
key_dict ={'P':'Accepted','C': 'Created','Z': 'Closed','O': 'On Wait'}
return key_dict[keyvalue]
@register.filter
def paintborder(priority):
"""filter koji dodaje boju za vaznost"""
key_dict ={'C':'#ce2b37','H': '#ee6c3a','M': '#41783f','L': '#3d70b6'}
return key_dict[priority]
@register.filter
def event_glyphicon_style(event):
return {'K':"glyphicon-comment",
'C':"glyphicon-record",
'S':"glyphicon-cog",
'A':"glyphicon-plus-sign",
'P':"glyphicon-exclamation-sign",
'R':"glyphicon-ok-sign",
}[event.event_kind]
@register.filter
def task_priority_style(task):
if task.state_kind == 'Z':
return ""
style_prefix = "bs-callout-"
return style_prefix + {'L':"success",
'M':"info",
'H':"warning",
'C':"danger",
}[task.priority_lvl]
@register.filter
def event_summary(event):
if event.requirement_task:
if hasattr(event.requirement_task, 'task'):
summary="Task "
elif hasattr(event.requirement_task, 'requirement'):
summary="Requirement "
else:
summary=''
summary += '"'+event.requirement_task.name+'": '
elif event.milestone:
summary = '"Milestone "'+event.milestone.name+'": '
else:
summary = ''
if event.event_kind == 'K':
summary += event.comment.content
elif event.event_kind == 'C':
summary += event.commit.message
elif event.event_kind == 'S':
summary += event.statechange.getstate()
max_length = 100
if len(summary) > max_length:
summary = summary[:max_length-3]+"..."
else:
summary = summary[:max_length]
return summary
def do_escape(to_escape, autoescape):
return conditional_escape(to_escape) if autoescape else to_escape
@register.filter
def event_user(event, autoescape=None):
if event.event_kind == 'C':
if event.commit.committer_user:
user = event.commit.committer_user
ret = """<a href="{author_url}"><span class="glyphicon glyphicon-user"></span> {user_name}</a>""".format(author_url=reverse('author', kwargs={'pk':user.pk}), user_name=do_escape(user.username, autoescape))
else:
ret = """<span class="glyphicon glyphicon-user"></span> {user_name}""".format(user_name=do_escape(event.commit.committer_name, autoescape))
else:
ret = """<a href="{author_url}"><span class="glyphicon glyphicon-user"></span> {user_name}</a>""".format(author_url=reverse('author', kwargs={'pk':event.event_user.pk}), user_name=do_escape(event.event_user.username, autoescape))
return mark_safe(ret)
| 0.014293 |
class Solution(object):
def nextPermutation(self, nums):
"""
# O(n)
# O(1)
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
# Use two-pointers: two pointers start from back
# first pointer j stop at descending point
# second pointer i stop at value > nums[j]
# swap and sort rest
# j - position
if not nums:
return nums
j = 0
# find
for i in range(len(nums)-1, -1, -1):
if nums[i-1] < nums[i]:
j = i - 1
break
for i in range(len(nums)-1, -1, -1):
if nums[i] > nums[j]:
nums[i], nums[j] = nums[j], nums[i]
nums[j+1:] = sorted(nums[j+1:])
return
if not nums:
return nums
i = len(nums)
j = 0
for i in range(len(nums)-1,-1,-1):
if nums[i-1] < nums[i]:
j = i-1
break
for i in range(len(nums)-1, -1,-1):
if nums[i] > nums[j]:
nums[i], nums[j] = nums[j], nums[i]
nums[j+1:] = sorted(nums[j+1:])
return | 0.005569 |
"""
This is just code for the introduction to Python.
It also won't be used anywhere else in the book.
"""
# type: ignore
# The pound sign marks the start of a comment. Python itself
# ignores the comments, but they're helpful for anyone reading the code.
for i in [1, 2, 3, 4, 5]:
print(i) # first line in "for i" block
for j in [1, 2, 3, 4, 5]:
print(j) # first line in "for j" block
print(i + j) # last line in "for j" block
print(i) # last line in "for i" block
print("done looping")
long_winded_computation = (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 +
13 + 14 + 15 + 16 + 17 + 18 + 19 + 20)
list_of_lists = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
easier_to_read_list_of_lists = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
two_plus_three = 2 + \
3
for i in [1, 2, 3, 4, 5]:
# notice the blank line
print(i)
import re
my_regex = re.compile("[0-9]+", re.I)
import re as regex
my_regex = regex.compile("[0-9]+", regex.I)
from collections import defaultdict, Counter
lookup = defaultdict(int)
my_counter = Counter()
match = 10
from re import * # uh oh, re has a match function
print(match) # "<function match at 0x10281e6a8>"
def double(x):
"""
This is where you put an optional docstring that explains what the
function does. For example, this function multiplies its input by 2.
"""
return x * 2
def apply_to_one(f):
"""Calls the function f with 1 as its argument"""
return f(1)
my_double = double # refers to the previously defined function
x = apply_to_one(my_double) # equals 2
assert x == 2
y = apply_to_one(lambda x: x + 4) # equals 5
assert y == 5
another_double = lambda x: 2 * x # Don't do this
def another_double(x):
"""Do this instead"""
return 2 * x
def my_print(message = "my default message"):
print(message)
my_print("hello") # prints 'hello'
my_print() # prints 'my default message'
def full_name(first = "What's-his-name", last = "Something"):
return first + " " + last
full_name("Joel", "Grus") # "Joel Grus"
full_name("Joel") # "Joel Something"
full_name(last="Grus") # "What's-his-name Grus"
assert full_name("Joel", "Grus") == "Joel Grus"
assert full_name("Joel") == "Joel Something"
assert full_name(last="Grus") == "What's-his-name Grus"
single_quoted_string = 'data science'
double_quoted_string = "data science"
tab_string = "\t" # represents the tab character
len(tab_string) # is 1
assert len(tab_string) == 1
not_tab_string = r"\t" # represents the characters '\' and 't'
len(not_tab_string) # is 2
assert len(not_tab_string) == 2
multi_line_string = """This is the first line.
and this is the second line
and this is the third line"""
first_name = "Joel"
last_name = "Grus"
full_name1 = first_name + " " + last_name # string addition
full_name2 = "{0} {1}".format(first_name, last_name) # string.format
full_name3 = f"{first_name} {last_name}"
try:
print(0 / 0)
except ZeroDivisionError:
print("cannot divide by zero")
integer_list = [1, 2, 3]
heterogeneous_list = ["string", 0.1, True]
list_of_lists = [integer_list, heterogeneous_list, []]
list_length = len(integer_list) # equals 3
list_sum = sum(integer_list) # equals 6
assert list_length == 3
assert list_sum == 6
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
zero = x[0] # equals 0, lists are 0-indexed
one = x[1] # equals 1
nine = x[-1] # equals 9, 'Pythonic' for last element
eight = x[-2] # equals 8, 'Pythonic' for next-to-last element
x[0] = -1 # now x is [-1, 1, 2, 3, ..., 9]
assert x == [-1, 1, 2, 3, 4, 5, 6, 7, 8, 9]
first_three = x[:3] # [-1, 1, 2]
three_to_end = x[3:] # [3, 4, ..., 9]
one_to_four = x[1:5] # [1, 2, 3, 4]
last_three = x[-3:] # [7, 8, 9]
without_first_and_last = x[1:-1] # [1, 2, ..., 8]
copy_of_x = x[:] # [-1, 1, 2, ..., 9]
every_third = x[::3] # [-1, 3, 6, 9]
five_to_three = x[5:2:-1] # [5, 4, 3]
assert every_third == [-1, 3, 6, 9]
assert five_to_three == [5, 4, 3]
1 in [1, 2, 3] # True
0 in [1, 2, 3] # False
x = [1, 2, 3]
x.extend([4, 5, 6]) # x is now [1, 2, 3, 4, 5, 6]
assert x == [1, 2, 3, 4, 5, 6]
x = [1, 2, 3]
y = x + [4, 5, 6] # y is [1, 2, 3, 4, 5, 6]; x is unchanged
assert x == [1, 2, 3]
assert y == [1, 2, 3, 4, 5, 6]
x = [1, 2, 3]
x.append(0) # x is now [1, 2, 3, 0]
y = x[-1] # equals 0
z = len(x) # equals 4
assert x == [1, 2, 3, 0]
assert y == 0
assert z == 4
x, y = [1, 2] # now x is 1, y is 2
assert x == 1
assert y == 2
_, y = [1, 2] # now y == 2, didn't care about the first element
my_list = [1, 2]
my_tuple = (1, 2)
other_tuple = 3, 4
my_list[1] = 3 # my_list is now [1, 3]
try:
my_tuple[1] = 3
except TypeError:
print("cannot modify a tuple")
def sum_and_product(x, y):
return (x + y), (x * y)
sp = sum_and_product(2, 3) # sp is (5, 6)
s, p = sum_and_product(5, 10) # s is 15, p is 50
x, y = 1, 2 # now x is 1, y is 2
x, y = y, x # Pythonic way to swap variables; now x is 2, y is 1
assert x == 2
assert y == 1
empty_dict = {} # Pythonic
empty_dict2 = dict() # less Pythonic
grades = {"Joel": 80, "Tim": 95} # dictionary literal
joels_grade = grades["Joel"] # equals 80
assert joels_grade == 80
try:
kates_grade = grades["Kate"]
except KeyError:
print("no grade for Kate!")
joel_has_grade = "Joel" in grades # True
kate_has_grade = "Kate" in grades # False
assert joel_has_grade
assert not kate_has_grade
joels_grade = grades.get("Joel", 0) # equals 80
kates_grade = grades.get("Kate", 0) # equals 0
no_ones_grade = grades.get("No One") # default default is None
assert joels_grade == 80
assert kates_grade == 0
assert no_ones_grade is None
grades["Tim"] = 99 # replaces the old value
grades["Kate"] = 100 # adds a third entry
num_students = len(grades) # equals 3
assert num_students == 3
tweet = {
"user" : "joelgrus",
"text" : "Data Science is Awesome",
"retweet_count" : 100,
"hashtags" : ["#data", "#science", "#datascience", "#awesome", "#yolo"]
}
tweet_keys = tweet.keys() # iterable for the keys
tweet_values = tweet.values() # iterable for the values
tweet_items = tweet.items() # iterable for the (key, value) tuples
"user" in tweet_keys # True, but not Pythonic
"user" in tweet # Pythonic way of checking for keys
"joelgrus" in tweet_values # True (slow but the only way to check)
assert "user" in tweet_keys
assert "user" in tweet
assert "joelgrus" in tweet_values
document = ["data", "science", "from", "scratch"]
word_counts = {}
for word in document:
if word in word_counts:
word_counts[word] += 1
else:
word_counts[word] = 1
word_counts = {}
for word in document:
try:
word_counts[word] += 1
except KeyError:
word_counts[word] = 1
word_counts = {}
for word in document:
previous_count = word_counts.get(word, 0)
word_counts[word] = previous_count + 1
from collections import defaultdict
word_counts = defaultdict(int) # int() produces 0
for word in document:
word_counts[word] += 1
dd_list = defaultdict(list) # list() produces an empty list
dd_list[2].append(1) # now dd_list contains {2: [1]}
dd_dict = defaultdict(dict) # dict() produces an empty dict
dd_dict["Joel"]["City"] = "Seattle" # {"Joel" : {"City": Seattle"}}
dd_pair = defaultdict(lambda: [0, 0])
dd_pair[2][1] = 1 # now dd_pair contains {2: [0, 1]}
from collections import Counter
c = Counter([0, 1, 2, 0]) # c is (basically) {0: 2, 1: 1, 2: 1}
# recall, document is a list of words
word_counts = Counter(document)
# print the 10 most common words and their counts
for word, count in word_counts.most_common(10):
print(word, count)
primes_below_10 = {2, 3, 5, 7}
s = set()
s.add(1) # s is now {1}
s.add(2) # s is now {1, 2}
s.add(2) # s is still {1, 2}
x = len(s) # equals 2
y = 2 in s # equals True
z = 3 in s # equals False
hundreds_of_other_words = [] # required for the below code to run
stopwords_list = ["a", "an", "at"] + hundreds_of_other_words + ["yet", "you"]
"zip" in stopwords_list # False, but have to check every element
stopwords_set = set(stopwords_list)
"zip" in stopwords_set # very fast to check
item_list = [1, 2, 3, 1, 2, 3]
num_items = len(item_list) # 6
item_set = set(item_list) # {1, 2, 3}
num_distinct_items = len(item_set) # 3
distinct_item_list = list(item_set) # [1, 2, 3]
assert num_items == 6
assert item_set == {1, 2, 3}
assert num_distinct_items == 3
assert distinct_item_list == [1, 2, 3]
if 1 > 2:
message = "if only 1 were greater than two..."
elif 1 > 3:
message = "elif stands for 'else if'"
else:
message = "when all else fails use else (if you want to)"
parity = "even" if x % 2 == 0 else "odd"
x = 0
while x < 10:
print(f"{x} is less than 10")
x += 1
# range(10) is the numbers 0, 1, ..., 9
for x in range(10):
print(f"{x} is less than 10")
for x in range(10):
if x == 3:
continue # go immediately to the next iteration
if x == 5:
break # quit the loop entirely
print(x)
one_is_less_than_two = 1 < 2 # equals True
true_equals_false = True == False # equals False
assert one_is_less_than_two
assert not true_equals_false
x = None
assert x == None, "this is the not the Pythonic way to check for None"
assert x is None, "this is the Pythonic way to check for None"
def some_function_that_returns_a_string():
return ""
s = some_function_that_returns_a_string()
if s:
first_char = s[0]
else:
first_char = ""
first_char = s and s[0]
safe_x = x or 0
safe_x = x if x is not None else 0
all([True, 1, {3}]) # True, all are truthy
all([True, 1, {}]) # False, {} is falsy
any([True, 1, {}]) # True, True is truthy
all([]) # True, no falsy elements in the list
any([]) # False, no truthy elements in the list
x = [4, 1, 2, 3]
y = sorted(x) # y is [1, 2, 3, 4], x is unchanged
x.sort() # now x is [1, 2, 3, 4]
# sort the list by absolute value from largest to smallest
x = sorted([-4, 1, -2, 3], key=abs, reverse=True) # is [-4, 3, -2, 1]
# sort the words and counts from highest count to lowest
wc = sorted(word_counts.items(),
key=lambda word_and_count: word_and_count[1],
reverse=True)
even_numbers = [x for x in range(5) if x % 2 == 0] # [0, 2, 4]
squares = [x * x for x in range(5)] # [0, 1, 4, 9, 16]
even_squares = [x * x for x in even_numbers] # [0, 4, 16]
assert even_numbers == [0, 2, 4]
assert squares == [0, 1, 4, 9, 16]
assert even_squares == [0, 4, 16]
square_dict = {x: x * x for x in range(5)} # {0: 0, 1: 1, 2: 4, 3: 9, 4: 16}
square_set = {x * x for x in [1, -1]} # {1}
assert square_dict == {0: 0, 1: 1, 2: 4, 3: 9, 4: 16}
assert square_set == {1}
zeros = [0 for _ in even_numbers] # has the same length as even_numbers
assert zeros == [0, 0, 0]
pairs = [(x, y)
for x in range(10)
for y in range(10)] # 100 pairs (0,0) (0,1) ... (9,8), (9,9)
assert len(pairs) == 100
increasing_pairs = [(x, y) # only pairs with x < y,
for x in range(10) # range(lo, hi) equals
for y in range(x + 1, 10)] # [lo, lo + 1, ..., hi - 1]
assert len(increasing_pairs) == 9 + 8 + 7 + 6 + 5 + 4 + 3 + 2 + 1
assert all(x < y for x, y in increasing_pairs)
assert 1 + 1 == 2
assert 1 + 1 == 2, "1 + 1 should equal 2 but didn't"
def smallest_item(xs):
return min(xs)
assert smallest_item([10, 20, 5, 40]) == 5
assert smallest_item([1, 0, -1, 2]) == -1
def smallest_item(xs):
assert xs, "empty list has no smallest item"
return min(xs)
class CountingClicker:
"""A class can/should have a docstring, just like a function"""
def __init__(self, count = 0):
self.count = count
def __repr__(self):
return f"CountingClicker(count={self.count})"
def click(self, num_times = 1):
"""Click the clicker some number of times."""
self.count += num_times
def read(self):
return self.count
def reset(self):
self.count = 0
clicker = CountingClicker()
assert clicker.read() == 0, "clicker should start with count 0"
clicker.click()
clicker.click()
assert clicker.read() == 2, "after two clicks, clicker should have count 2"
clicker.reset()
assert clicker.read() == 0, "after reset, clicker should be back to 0"
# A subclass inherits all the behavior of its parent class.
class NoResetClicker(CountingClicker):
# This class has all the same methods as CountingClicker
# Except that it has a reset method that does nothing.
def reset(self):
pass
clicker2 = NoResetClicker()
assert clicker2.read() == 0
clicker2.click()
assert clicker2.read() == 1
clicker2.reset()
assert clicker2.read() == 1, "reset shouldn't do anything"
def generate_range(n):
i = 0
while i < n:
yield i # every call to yield produces a value of the generator
i += 1
for i in generate_range(10):
print(f"i: {i}")
def natural_numbers():
"""returns 1, 2, 3, ..."""
n = 1
while True:
yield n
n += 1
evens_below_20 = (i for i in generate_range(20) if i % 2 == 0)
# None of these computations *does* anything until we iterate
data = natural_numbers()
evens = (x for x in data if x % 2 == 0)
even_squares = (x ** 2 for x in evens)
even_squares_ending_in_six = (x for x in even_squares if x % 10 == 6)
# and so on
assert next(even_squares_ending_in_six) == 16
assert next(even_squares_ending_in_six) == 36
assert next(even_squares_ending_in_six) == 196
names = ["Alice", "Bob", "Charlie", "Debbie"]
# not Pythonic
for i in range(len(names)):
print(f"name {i} is {names[i]}")
# also not Pythonic
i = 0
for name in names:
print(f"name {i} is {names[i]}")
i += 1
# Pythonic
for i, name in enumerate(names):
print(f"name {i} is {name}")
import random
random.seed(10) # this ensures we get the same results every time
four_uniform_randoms = [random.random() for _ in range(4)]
# [0.5714025946899135, # random.random() produces numbers
# 0.4288890546751146, # uniformly between 0 and 1
# 0.5780913011344704, # it's the random function we'll use
# 0.20609823213950174] # most often
random.seed(10) # set the seed to 10
print(random.random()) # 0.57140259469
random.seed(10) # reset the seed to 10
print(random.random()) # 0.57140259469 again
random.randrange(10) # choose randomly from range(10) = [0, 1, ..., 9]
random.randrange(3, 6) # choose randomly from range(3, 6) = [3, 4, 5]
up_to_ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
random.shuffle(up_to_ten)
print(up_to_ten)
# [7, 2, 6, 8, 9, 4, 10, 1, 3, 5] (your results will probably be different)
my_best_friend = random.choice(["Alice", "Bob", "Charlie"]) # "Bob" for me
lottery_numbers = range(60)
winning_numbers = random.sample(lottery_numbers, 6) # [16, 36, 10, 6, 25, 9]
four_with_replacement = [random.choice(range(10)) for _ in range(4)]
print(four_with_replacement) # [9, 4, 4, 2]
import re
re_examples = [ # all of these are true, because
not re.match("a", "cat"), # 'cat' doesn't start with 'a'
re.search("a", "cat"), # 'cat' has an 'a' in it
not re.search("c", "dog"), # 'dog' doesn't have a 'c' in it
3 == len(re.split("[ab]", "carbs")), # split on a or b to ['c','r','s']
"R-D-" == re.sub("[0-9]", "-", "R2D2") # replace digits with dashes
]
assert all(re_examples), "all the regex examples should be True"
list1 = ['a', 'b', 'c']
list2 = [1, 2, 3]
# zip is lazy, so you have to do something like the following
[pair for pair in zip(list1, list2)] # is [('a', 1), ('b', 2), ('c', 3)]
assert [pair for pair in zip(list1, list2)] == [('a', 1), ('b', 2), ('c', 3)]
pairs = [('a', 1), ('b', 2), ('c', 3)]
letters, numbers = zip(*pairs)
letters, numbers = zip(('a', 1), ('b', 2), ('c', 3))
def add(a, b): return a + b
add(1, 2) # returns 3
try:
add([1, 2])
except TypeError:
print("add expects two inputs")
add(*[1, 2]) # returns 3
def doubler(f):
# Here we define a new function that keeps a reference to f
def g(x):
return 2 * f(x)
# And return that new function.
return g
def f1(x):
return x + 1
g = doubler(f1)
assert g(3) == 8, "(3 + 1) * 2 should equal 8"
assert g(-1) == 0, "(-1 + 1) * 2 should equal 0"
def f2(x, y):
return x + y
g = doubler(f2)
try:
g(1, 2)
except TypeError:
print("as defined, g only takes one argument")
def magic(*args, **kwargs):
print("unnamed args:", args)
print("keyword args:", kwargs)
magic(1, 2, key="word", key2="word2")
# prints
# unnamed args: (1, 2)
# keyword args: {'key': 'word', 'key2': 'word2'}
def other_way_magic(x, y, z):
return x + y + z
x_y_list = [1, 2]
z_dict = {"z": 3}
assert other_way_magic(*x_y_list, **z_dict) == 6, "1 + 2 + 3 should be 6"
def doubler_correct(f):
"""works no matter what kind of inputs f expects"""
def g(*args, **kwargs):
"""whatever arguments g is supplied, pass them through to f"""
return 2 * f(*args, **kwargs)
return g
g = doubler_correct(f2)
assert g(1, 2) == 6, "doubler should work now"
def add(a, b):
return a + b
assert add(10, 5) == 15, "+ is valid for numbers"
assert add([1, 2], [3]) == [1, 2, 3], "+ is valid for lists"
assert add("hi ", "there") == "hi there", "+ is valid for strings"
try:
add(10, "five")
except TypeError:
print("cannot add an int to a string")
def add(a: int, b: int) -> int:
return a + b
add(10, 5) # you'd like this to be OK
add("hi ", "there") # you'd like this to be not OK
# This is not in the book, but it's needed
# to make the `dot_product` stubs not error out.
from typing import List
Vector = List[float]
def dot_product(x, y): ...
# we have not yet defined Vector, but imagine we had
def dot_product(x: Vector, y: Vector) -> float: ...
from typing import Union
def secretly_ugly_function(value, operation): ...
def ugly_function(value: int, operation: Union[str, int, float, bool]) -> int:
...
def total(xs: list) -> float:
return sum(xs)
from typing import List # note capital L
def total(xs: List[float]) -> float:
return sum(xs)
# This is how to type-annotate variables when you define them.
# But this is unnecessary; it's "obvious" x is an int.
x: int = 5
values = [] # what's my type?
best_so_far = None # what's my type?
from typing import Optional
values: List[int] = []
best_so_far: Optional[float] = None # allowed to be either a float or None
lazy = True
# the type annotations in this snippet are all unnecessary
from typing import Dict, Iterable, Tuple
# keys are strings, values are ints
counts: Dict[str, int] = {'data': 1, 'science': 2}
# lists and generators are both iterable
if lazy:
evens: Iterable[int] = (x for x in range(10) if x % 2 == 0)
else:
evens = [0, 2, 4, 6, 8]
# tuples specify a type for each element
triple: Tuple[int, float, int] = (10, 2.3, 5)
from typing import Callable
# The type hint says that repeater is a function that takes
# two arguments, a string and an int, and returns a string.
def twice(repeater: Callable[[str, int], str], s: str) -> str:
return repeater(s, 2)
def comma_repeater(s: str, n: int) -> str:
n_copies = [s for _ in range(n)]
return ', '.join(n_copies)
assert twice(comma_repeater, "type hints") == "type hints, type hints"
Number = int
Numbers = List[Number]
def total(xs: Numbers) -> Number:
return sum(xs)
| 0.00478 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class Invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
# Check if user is in group 'group_account_invoice_limit_discount'
if not self.pool.get('res.users').has_group(cr, uid,
'sale_max_discount.group_sale_max_discount'):
for invoice in self.browse(cr, uid, ids, context=context):
if invoice.type == 'out_invoice' or invoice.type == 'out_refund':
pricelist = invoice.pricelist_id
if pricelist and pricelist.discount_active:
max_discount = [pricelist.max_discount]
else:
max_discount = []
for line in invoice.invoice_line:
line_discount = max_discount
if line.product_id:
if line.product_id.categ_id.discount_active:
line_discount.append(line.product_id.categ_id.max_discount)
if line_discount:
lower_discount = min(line_discount)
if line.discount and line.discount > lower_discount:
raise osv.except_osv(_('Discount Error'),
_('The maximum discount for %s is %s.') %(line.name,lower_discount))
else:
#Get company for user currently logged
company = self.pool.get('res.users').browse(cr, uid, uid).company_id
if company.limit_discount_active:
max_company_discount = company.limit_discount_max_discount
if line.discount > max_company_discount:
raise osv.except_osv(_('Discount Error'), _('The discount for %s exceeds the'
' company\'s discount limit.') % line.name)
return super(Invoice, self).invoice_validate(cr, uid, ids, context=context)
| 0.005231 |
#!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import cPickle
import sys
from matplotlib.gridspec import GridSpec
from matplotlib.pyplot import xticks, figure, subplot, savefig, show, tight_layout, subplots_adjust
import numpy as np
from HPOlib.Plotting import plot_util
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__license__ = "3-clause BSD License"
__contact__ = "automl.org"
def plot_box_whisker(best_trials, name_list, title="", save="", y_min=0,
y_max=0):
ratio = 5
gs = GridSpec(ratio, 1)
fig = figure(1, dpi=100)
fig.suptitle(title)
ax = subplot(gs[0:ratio, :])
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
bp = ax.boxplot(best_trials, 0, 'ok')
boxlines = bp['boxes']
for line in boxlines:
line.set_color('k')
line.set_linewidth(2)
min_y = sys.maxint
max_y = -sys.maxint
# Get medians and limits
medians = range(len(name_list))
for i in range(len(name_list)):
med = bp['medians'][i]
median_x = []
median_y = []
for j in range(2):
median_x.append(med.get_xdata()[j])
median_y.append(med.get_ydata()[j])
medians[i] = median_y[0]
if min(best_trials[i]) < min_y:
min_y = min(best_trials[i])
if max(best_trials[i]) > max_y:
max_y = max(best_trials[i])
print medians
# Plot xticks
xticks(range(1, len(name_list)+1), name_list)
# Set limits
if y_max == y_min:
# Set axes limit
ax.set_ylim([min_y-0.1*abs((max_y-min_y)), max_y+0.1*abs((max_y-min_y))])
else:
ax.set_ylim([y_min, y_max])
max_y = y_max
min_y = y_min
# Print medians as upper labels
top = max_y-((max_y-min_y)*0.05)
pos = np.arange(len(name_list))+1
upper_labels = [str(np.round(s, 5)) for s in medians]
upper_labels[0] = "median=[%s," % upper_labels[0]
for i in range(len(upper_labels[1:-1])):
upper_labels[i+1] = "%s," % upper_labels[i+1]
upper_labels[-1] = "%s]" % upper_labels[-1]
for tick, label in zip(range(len(name_list)), ax.get_xticklabels()):
ax.text(pos[tick], top, upper_labels[tick],
horizontalalignment='center', size='x-small')
ax.set_ylabel('Minfunction')
tight_layout()
subplots_adjust(top=0.85)
if save != "":
savefig(save, dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
show()
def main(pkl_list, name_list, title="", save="", y_min=0, y_max=0, cut=sys.maxint):
best_trials = list()
for i in range(len(name_list)):
best_trials.append(list())
for pkl in pkl_list[i]:
fh = open(pkl, "r")
trials = cPickle.load(fh)
fh.close()
best_trials[i].append(plot_util.get_best(trials, cut=cut))
plot_box_whisker(best_trials=best_trials, name_list=name_list,
title=title, save=save, y_min=y_min, y_max=y_max)
if save != "":
sys.stdout.write("Saving plot to " + save + "\n")
else:
sys.stdout.write("..Done\n")
if __name__ == "__main__":
prog = "python plotBoxWhisker.py WhatIsThis <ManyPickles> [WhatIsThis <ManyPickles>]"
description = "Plot a Box whisker plot for many experiments. The box covers lower to upper quartile."
parser = ArgumentParser(description=description, prog=prog)
# General Options
parser.add_argument("-t", "--title", dest="title", default="",
help="Optional supertitle for plot")
parser.add_argument("--max", dest="max", default=0,
type=float, help="Maximum of the plot")
parser.add_argument("--min", dest="min", default=0,
type=float, help="Minimum of the plot")
parser.add_argument("-s", "--save", dest="save", default="",
help="Where to save plot instead of showing it?")
parser.add_argument("-c", "--cut", default=sys.maxint, type=int,
help="Cut experiment pickles after a specified number of trials.")
args, unknown = parser.parse_known_args()
sys.stdout.write("\nFound " + str(len(unknown)) + " arguments...")
pkl_list_main, name_list_main = plot_util.get_pkl_and_name_list(unknown)
main(pkl_list=pkl_list_main, name_list=name_list_main, title=args.title, save=args.save,
y_min=args.min, y_max=args.max, cut=args.cut)
| 0.001852 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import csv
import re
import requests
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from candidates.models import PartySet, OrganizationExtra
from popolo import models as popolo_models
from popolo.importers.popit import PopItImporter
def fix_whitespace(s):
s = s.strip()
return re.sub(r'(?ms)\s+', ' ', s)
"""
Takes an argument of a CSV file which should have 3 columns:
1: Name of party
2: ID of party (optional)
3: Comma separated list of Cantons the party is standing in
The ID is the "Cédula Jurídica" and is added as an Identifier and
used as the slug, otherwise we fall back to the slugified name.
It expects the CSV file to have NO header row.
It will create, or update, a party for each row in the CSV and
a party set for each Canton, adding the party to the party sets
for the Cantons in which the party is standing.
"""
class Command(BaseCommand):
help = 'Create or update parties from a CSV file'
def add_arguments(self, parser):
parser.add_argument('CSV-FILENAME')
def add_id(self, party, party_id):
party.identifiers.update_or_create(
scheme='cedula-juridica',
defaults={'identifier': party_id}
)
def generate_party_sets(self):
mapit_url = 'http://international.mapit.mysociety.org/areas/CRCANTON'
cantons = requests.get(mapit_url).json()
for canton_id, canton_data in cantons.items():
name = canton_data['name']
self.get_party_set(name)
def update_party(self, party_data):
# strip any bracketed information from the end of the name
# as it's information about party initials or canton
name = re.search(
r'^([^(]*)\(?',
fix_whitespace(party_data[0])
).group(1).decode('utf-8')
party_id = fix_whitespace(party_data[1])
# remove the (13/81) information text from the end of
# the canton list.
canton_list = re.search(
r'^([^(]*)\(?',
fix_whitespace(party_data[2])
).group(1)
cantons = canton_list.split(',')
# if posible we should use the official id but failing that fall
# back to a slugified name
if party_id != '':
slug = party_id
else:
slug = slugify(name)
try:
# slug should be consistent and not have any
# encoding issues
org_extra = OrganizationExtra.objects.get(
slug=slug
)
org = org_extra.base
print("found existing party {0}".format(name))
except OrganizationExtra.DoesNotExist:
org = popolo_models.Organization.objects.create(
name=name,
classification='Party'
)
OrganizationExtra.objects.create(
base=org, slug=slug
)
print("created new party {0}".format(name))
if party_id != '':
self.add_id(org, party_id)
for canton in cantons:
canton = canton.decode('utf-8')
party_set = self.get_party_set(canton)
if not org.party_sets.filter(slug=party_set.slug):
print("adding party set {0}".format(party_set.slug))
org.party_sets.add(party_set)
def get_party_set(self, canton_name):
canton = fix_whitespace(canton_name)
party_set_slug = "2016_canton_{0}".format(slugify(canton))
party_set_name = "2016 parties in {0} Canton".format(canton)
try:
return PartySet.objects.get(slug=party_set_slug)
except PartySet.DoesNotExist:
self.stdout.write("Couldn't find the party set '{0}'".format(
party_set_slug
))
return PartySet.objects.create(
slug=party_set_slug, name=party_set_name
)
def handle(self, **options):
self.importer = PopItImporter()
self.generate_party_sets()
with open(options['CSV-FILENAME']) as f:
csv_reader = csv.reader(f)
for party_data in csv_reader:
self.update_party(party_data)
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Here is a wrapper for the *unreleased* electric objects API.
Built by
• Harper Reed (harper@nata2.org) - @harper
• Gary Boone (gary.boone@gmail.com) - github.com/GaryBoone
The Electric Objects API is not yet supported by Electric Objects. It may change or
stop working at any time.
See the __main__ below for example API calls.
As configured, this module will display a random image from the favorites you marked on
electricobjects.com.
To use as is, you need to set your electricobjects.com login credentials. See the
get_credentials() function for how to do so.
Randomized images are picked among the first 200 images shown on your favorites page on
electricobjects.com. Change MAX_FAVORITES_FOR_DISPLAY below to adjust this limit.
Usage: $ python eo.py
Written for Python 2.7.x.
"""
import eo_api
import logging
import logging.handlers
import os
import random
import requests
from scheduler import Scheduler
import sys
CREDENTIALS_FILE = ".credentials"
USER_ENV_VAR = "EO_USER"
PASSWORD_ENV_VAR = "EO_PASS"
LOG_FILENAME = 'eo-python.log'
LOG_SIZE = 1000000 # bytes
LOG_NUM = 5 # number of rotating logs to keep
SCHEDULE = ["7:02", "12:02", "17:02", "22:02"] # 24-hour time format
SCHEDULE_JITTER = 10 # in minutes
# The maximum number of favorites to consider for randomly displaying one.
MAX_FAVORITES_FOR_DISPLAY = 200
# The number of favorites to pull per request.
NUM_FAVORITES_PER_REQUEST = 30
class ElectricObject(object):
"""The ElectricObject class provides functions for the Electric Objects EO1."""
def __init__(self, username, password):
self.api = eo_api.EO_API(username, password)
self.logger = logging.getLogger(".".join(["eo", self.__class__.__name__]))
def user(self):
"""Obtain the user information."""
return self.api.make_request("user", method="GET")
def favorite(self, media_id):
"""Set a media as a favorite by id."""
return self.api.make_request("favorited", method="PUT", path_append=media_id)
def unfavorite(self, media_id):
"""Remove a media as a favorite by id."""
return self.api.make_request("favorited", method="DELETE", path_append=media_id)
def display(self, media_id):
"""Display media by id."""
return self.api.make_request("displayed", method="PUT", path_append=media_id)
def favorites(self):
"""Return the user's list of favorites in JSON else [].
Returns:
An array of up to NUM_FAVORITES_PER_REQUEST favorites in JSON format
or else an empty list.
"""
offset = 0
favorites = []
while True:
params = {
"limit": NUM_FAVORITES_PER_REQUEST,
"offset": offset
}
result_JSON = self.api.make_request("favorited", method="GET", params=params,
parse_json=True)
if not result_JSON:
break
favorites.extend(result_JSON)
if len(result_JSON) < NUM_FAVORITES_PER_REQUEST: # last page
break
if len(favorites) > MAX_FAVORITES_FOR_DISPLAY: # too many
favorites = favorites[:MAX_FAVORITES_FOR_DISPLAY]
break
offset += NUM_FAVORITES_PER_REQUEST
return favorites
def devices(self):
"""Return a list of devices in JSON format, else None."""
return self.api.make_request("devices", method="GET", parse_json=True)
def choose_random_item(self, items, excluded_id=None):
"""Return a random item, avoiding the one with the excluded_id, if given.
Args:
items: a list of Electric Objects artwork objects.
Returns:
An artwork item, which could have the excluded_id if there's only one choice,
or [] if the list is empty.
"""
if not items:
return []
if len(items) == 1:
return items[0]
if excluded_id:
items = [item for item in items if item["artwork"]["id"] != excluded_id]
return random.choice(items)
def current_artwork_id(self, device_json):
"""Return the id of the artwork currently displayed on the given device.
Args:
device_json: The JSON describing the state of a device.
Returns:
An artwork id or 0 if the id isn't present in the device_json.
"""
if not device_json:
return 0
id = 0
try:
id = device_json["reproduction"]["artwork"]["id"]
except KeyError as e:
self.logger.error("problem parsing device JSON. Missing key: {0}".format(e))
return id
def display_random_favorite(self):
"""Retrieve the user's favorites and display one of them randomly on the first device
associated with the signed-in user.
Note that at present, only the first 20 favorites are returned by the API.
A truely random choice could be the one already displayed. To avoid that, first
request the displayed image and remove it from the favorites list, if present.
Note:
This function works on the first device if there are multiple devices associated
with the given user.
Returns:
The id of the displayed favorite, else 0.
"""
devs = self.devices()
if not devs:
self.logger.error("in display_random_favorite: no devices returned.")
return 0
device_index = 0 # First device of user.
current_image_id = self.current_artwork_id(devs[device_index])
favs = self.favorites()
if favs == []:
return 0
fav_item = self.choose_random_item(favs, current_image_id)
if not fav_item:
return 0
fav_id = fav_item["artwork"]["id"]
res = self.display(str(fav_id))
return fav_id if res else 0
def set_url(self, url):
"""Display the given URL on the first device associated with the signed-in user.
Return True on success.
"""
devs = self.devices()
if not devs:
self.logger.error("in set_url: no devices returned.")
return 0
device_index = 0 # First device of user.
device_id = devs[device_index]["id"]
request_url = self.api.base_url + "set_url"
params = {
"device_id": device_id,
"custom_url": url
}
response = self.api.net.post_with_authenticity(request_url, params)
return response.status_code == requests.codes.ok
def get_credentials():
"""Returns the electricobjects.com username and password. They can be set here in the code,
in environment variables, or in a file named by CREDENTIALS_FILE.
A simple way to set them in the environment variables is prefix your command with them.
For example:
$ EO_USER=you@example.com EO_PASS=pword python eo.py
Don't forget to clear your command history if you don't want the credentials stored.
This function allows us to avoid uploading credentials to GitHub. In addition to not
writing them here, the credentials filename is included in the .gitignore file.
The sources are read in the order of: default, then environment variables, then file.
Each source overwrites the username and password separately, if set in that source.
Returns:
A dictionary with key/values for the username and password.
"""
username = "" # You can set them here if you don"t plan on uploading this code to GitHub.
password = ""
username = os.environ[USER_ENV_VAR] if USER_ENV_VAR in os.environ else username
password = os.environ[PASSWORD_ENV_VAR] if PASSWORD_ENV_VAR in os.environ else password
try:
with open(CREDENTIALS_FILE, "r") as f:
username = next(f).strip()
password = next(f).strip()
except:
pass # Fail silently if no file, missing lines, or other problem.
return {"username": username, "password": password}
def setup_logging():
"""Set up logging to log to rotating files and also console output."""
formatter = logging.Formatter('%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
logger = logging.getLogger("eo")
logger.setLevel(logging.INFO)
# rotating file handler
fh = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=LOG_SIZE, backupCount=LOG_NUM)
fh.setFormatter(formatter)
logger.addHandler(fh)
# console handler
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def show_a_new_favorite(eo):
"""Update the EO1 with a new, randomly selected favorite."""
logger = logging.getLogger("eo")
logger.info('Updating favorite')
displayed = eo.display_random_favorite()
if displayed:
logger.info("Displayed artwork id " + str(displayed))
def demo(eo):
"""An example that displays a random favorite."""
logger = logging.getLogger("eo")
displayed = eo.display_random_favorite()
if displayed:
logger.info("Displayed artwork id " + str(displayed))
# Let's set a URL.
# Hmmm. This one didn't work: http://www.ustream.tv/channel/live-iss-stream/pop-out
# The EO1 display reads: 'Missing Flash plugin'
#
# This one works, creating an autoplaying slideshow:
# url = "http://theslideshow.net/#advanced/search-advanced-query=architectural+study" + \
# "&imageSize=Extra_Large"
#
# A single image, 1080x1920:
# url = "http://hd.highresolution-wallpapers.net/wallpapers/" + \
# "board_circuit_silicon_chip_technology_high_resolution_wallpapers-1080x1920.jpg"
# displayed = eo.set_url(url)
# if displayed:
# log("Displayed URL " + url)
# Mark a media item as a favorite.
# print eo.favorite("5626")
# Now unfavorite it.
# print eo.unfavorite("5626")
# Display a media item by id.
# print eo.display("1136")
def main():
setup_logging()
credentials = get_credentials()
if credentials["username"] == "" or credentials["password"] == "":
logger = logging.getLogger("eo")
logger.error("The username or password are blank. See code for how to set them. Exiting.")
exit()
eo = ElectricObject(username=credentials["username"], password=credentials["password"])
if len(sys.argv) > 1 and sys.argv[1] == "--once":
show_a_new_favorite(eo)
exit()
scheduler = Scheduler(SCHEDULE, lambda: show_a_new_favorite(eo), schedule_jitter=SCHEDULE_JITTER)
scheduler.run()
if __name__ == "__main__":
main()
| 0.003881 |
#!/usr/bin/env python
import struct
import time
import dns
from dnsdisttests import DNSDistTest
try:
range = xrange
except NameError:
pass
class TestTCPKeepAlive(DNSDistTest):
"""
These tests make sure that dnsdist keeps the TCP connection alive
in various cases, like cache hits, self-generated answer, and
that it doesn't in error cases (Drop, invalid queries...)
"""
_tcpIdleTimeout = 20
_maxTCPQueriesPerConn = 99
_maxTCPConnsPerClient = 100
_maxTCPConnDuration = 99
_config_template = """
newServer{address="127.0.0.1:%s"}
setTCPRecvTimeout(%s)
setMaxTCPQueriesPerConnection(%s)
setMaxTCPConnectionsPerClient(%s)
setMaxTCPConnectionDuration(%s)
pc = newPacketCache(100, {maxTTL=86400, minTTL=1})
getPool(""):setCache(pc)
addAction("largernumberofconnections.tcpka.tests.powerdns.com.", SetSkipCacheAction())
addAction("refused.tcpka.tests.powerdns.com.", RCodeAction(DNSRCode.REFUSED))
addAction("dropped.tcpka.tests.powerdns.com.", DropAction())
addResponseAction("dropped-response.tcpka.tests.powerdns.com.", DropResponseAction())
-- create the pool named "nosuchpool"
getPool("nosuchpool")
addAction("nodownstream-servfail.tcpka.tests.powerdns.com.", PoolAction("nosuchpool"))
setServFailWhenNoServer(true)
"""
_config_params = ['_testServerPort', '_tcpIdleTimeout', '_maxTCPQueriesPerConn', '_maxTCPConnsPerClient', '_maxTCPConnDuration']
def testTCPKaSelfGenerated(self):
"""
TCP KeepAlive: Self-generated answer
"""
name = 'refused.tcpka.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
query.flags &= ~dns.flags.RD
expectedResponse = dns.message.make_response(query)
expectedResponse.set_rcode(dns.rcode.REFUSED)
conn = self.openTCPConnection()
count = 0
for idx in range(5):
try:
self.sendTCPQueryOverConnection(conn, query)
response = self.recvTCPResponseOverConnection(conn)
if response is None:
break
self.assertEquals(expectedResponse, response)
count = count + 1
except:
pass
conn.close()
self.assertEqual(count, 5)
def testTCPKaCacheHit(self):
"""
TCP KeepAlive: Cache Hit
"""
name = 'cachehit.tcpka.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
expectedResponse = dns.message.make_response(query)
rrset = dns.rrset.from_text(name,
3600,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
expectedResponse.answer.append(rrset)
# first query to fill the cache
(receivedQuery, receivedResponse) = self.sendTCPQuery(query, expectedResponse)
self.assertTrue(receivedQuery)
self.assertTrue(receivedResponse)
receivedQuery.id = query.id
self.assertEquals(query, receivedQuery)
self.assertEquals(receivedResponse, expectedResponse)
conn = self.openTCPConnection()
count = 0
for idx in range(5):
try:
self.sendTCPQueryOverConnection(conn, query)
response = self.recvTCPResponseOverConnection(conn)
if response is None:
break
self.assertEquals(expectedResponse, response)
count = count + 1
except:
pass
conn.close()
self.assertEqual(count, 5)
def testTCPKaNoDownstreamServFail(self):
"""
TCP KeepAlive: No downstream ServFail
The query is routed to a pool that has no server,
and dnsdist is configured to send a ServFail when
that happens. We should keep the TCP connection open.
"""
name = 'nodownstream-servfail.tcpka.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
expectedResponse = dns.message.make_response(query)
expectedResponse.set_rcode(dns.rcode.SERVFAIL)
conn = self.openTCPConnection()
count = 0
for idx in range(5):
try:
self.sendTCPQueryOverConnection(conn, query)
response = self.recvTCPResponseOverConnection(conn)
if response is None:
break
self.assertEquals(expectedResponse, response)
count = count + 1
except:
pass
conn.close()
self.assertEqual(count, 5)
def testTCPKaQRBitSet(self):
"""
TCP KeepAlive: QR bit set in question
"""
name = 'qrset.tcpka.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
query.flags |= dns.flags.QR
conn = self.openTCPConnection()
count = 0
for idx in range(5):
try:
self.sendTCPQueryOverConnection(conn, query)
response = self.recvTCPResponseOverConnection(conn)
if response is None:
break
count = count + 1
except:
pass
conn.close()
self.assertEqual(count, 0)
def testTCPKaDrop(self):
"""
TCP KeepAlive: Drop
"""
name = 'dropped.tcpka.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
query.flags |= dns.flags.QR
conn = self.openTCPConnection()
count = 0
for idx in range(5):
try:
self.sendTCPQueryOverConnection(conn, query)
response = self.recvTCPResponseOverConnection(conn)
if response is None:
break
count = count + 1
except:
pass
conn.close()
self.assertEqual(count, 0)
def testTCPKaDropResponse(self):
"""
TCP KeepAlive: Drop Response
"""
name = 'dropped-response.tcpka.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
conn = self.openTCPConnection()
count = 0
for idx in range(5):
try:
self.sendTCPQueryOverConnection(conn, query)
response = self.recvTCPResponseOverConnection(conn)
if response is None:
break
count = count + 1
except:
pass
conn.close()
self.assertEqual(count, 0)
def testTCPKaLargeNumberOfConnections(self):
"""
TCP KeepAlive: Large number of connections
"""
name = 'largernumberofconnections.tcpka.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
expectedResponse = dns.message.make_response(query)
#expectedResponse.set_rcode(dns.rcode.SERVFAIL)
rrset = dns.rrset.from_text(name,
3600,
dns.rdataclass.IN,
dns.rdatatype.A,
'192.0.2.1')
expectedResponse.answer.append(rrset)
# number of connections
numConns = 50
# number of queries per connections
numQueriesPerConn = 4
conns = []
start = time.time()
for idx in range(numConns):
conns.append(self.openTCPConnection())
count = 0
for idx in range(numConns * numQueriesPerConn):
try:
conn = conns[idx % numConns]
self.sendTCPQueryOverConnection(conn, query, response=expectedResponse)
response = self.recvTCPResponseOverConnection(conn)
if response is None:
break
self.assertEquals(expectedResponse, response)
count = count + 1
except:
pass
for con in conns:
conn.close()
self.assertEqual(count, numConns * numQueriesPerConn)
class TestTCPKeepAliveNoDownstreamDrop(DNSDistTest):
"""
This test makes sure that dnsdist drops the TCP connection
if no downstream server is available and setServFailWhenNoServer()
is not set.
"""
_tcpIdleTimeout = 20
_maxTCPQueriesPerConn = 99
_maxTCPConnsPerClient = 3
_maxTCPConnDuration = 99
_config_template = """
newServer{address="127.0.0.1:%s"}
setTCPRecvTimeout(%s)
setMaxTCPQueriesPerConnection(%s)
setMaxTCPConnectionsPerClient(%s)
setMaxTCPConnectionDuration(%s)
-- create the pool named "nosuchpool"
getPool("nosuchpool")
addAction("nodownstream-drop.tcpka.tests.powerdns.com.", PoolAction("nosuchpool"))
"""
_config_params = ['_testServerPort', '_tcpIdleTimeout', '_maxTCPQueriesPerConn', '_maxTCPConnsPerClient', '_maxTCPConnDuration']
def testTCPKaNoDownstreamDrop(self):
"""
TCP KeepAlive: No downstream Drop
The query is routed to a pool that has no server,
and dnsdist is configured to drop the query when
that happens. We should close the TCP connection right away.
"""
name = 'nodownstream-drop.tcpka.tests.powerdns.com.'
query = dns.message.make_query(name, 'A', 'IN')
conn = self.openTCPConnection()
count = 0
for idx in range(5):
try:
self.sendTCPQueryOverConnection(conn, query)
response = self.recvTCPResponseOverConnection(conn)
if response is None:
break
count = count + 1
except:
pass
conn.close()
self.assertEqual(count, 0)
| 0.002118 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DMUserProfile.high_school'
db.add_column('accounts_dmuserprofile', 'high_school', self.gf('django.db.models.fields.CharField')(default=u'', max_length=32, blank=True), keep_default=False)
# Adding field 'DMUserProfile.awards'
db.add_column('accounts_dmuserprofile', 'awards', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
# Adding field 'DMUserProfile.english_band_type'
db.add_column('accounts_dmuserprofile', 'english_band_type', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True), keep_default=False)
# Adding field 'DMUserProfile.english_band_score'
db.add_column('accounts_dmuserprofile', 'english_band_score', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'DMUserProfile.high_school'
db.delete_column('accounts_dmuserprofile', 'high_school')
# Deleting field 'DMUserProfile.awards'
db.delete_column('accounts_dmuserprofile', 'awards')
# Deleting field 'DMUserProfile.english_band_type'
db.delete_column('accounts_dmuserprofile', 'english_band_type')
# Deleting field 'DMUserProfile.english_band_score'
db.delete_column('accounts_dmuserprofile', 'english_band_score')
models = {
'accounts.dmuserprofile': {
'Meta': {'object_name': 'DMUserProfile'},
'awards': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'english_band_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'english_band_type': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'ethnic': ('django.db.models.fields.IntegerField', [], {}),
'gender': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'high_school': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'join_date': ('django.db.models.fields.DateField', [], {}),
'language': ('django.db.models.fields.CharField', [], {'default': "'zh'", 'max_length': '5'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'major': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'nickname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'political': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'role': ('django.db.models.fields.IntegerField', [], {}),
'sign_line': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '128', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
| 0.007505 |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import json
import logging
from indra.statements import *
from indra.literature import id_lookup
from indra.databases import hgnc_client, uniprot_client, chebi_client
logger = logging.getLogger('index_card_assembler')
global_submitter = 'cure'
class IndexCardAssembler(object):
def __init__(self, statements=None, pmc_override=None):
if statements is None:
self.statements = []
else:
self.statements = statements
self.cards = []
self.pmc_override = pmc_override
def add_statements(self, statements):
self.statements.extend(statements)
def make_model(self):
for stmt in self.statements:
if isinstance(stmt, Modification):
card = assemble_modification(stmt)
elif isinstance(stmt, SelfModification):
card = assemble_selfmodification(stmt)
elif isinstance(stmt, Complex):
card = assemble_complex(stmt)
elif isinstance(stmt, Translocation):
card = assemble_translocation(stmt)
else:
continue
if card is not None:
if self.pmc_override is not None:
card.card['pmc_id'] = self.pmc_override
else:
card.card['pmc_id'] = get_pmc_id(stmt)
self.cards.append(card)
def print_model(self):
cards = [c.card for c in self.cards]
# If there is only one card, print it as a single
# card not as a list
if len(cards) == 1:
cards = cards[0]
return json.dumps(cards, indent=1)
def save_model(self, file_name='index_cards.json'):
with open(file_name, 'wt') as fh:
fh.write(self.print_model())
class IndexCard(object):
def __init__(self):
self.card = {
'pmc_id': None,
'submitter': None,
'interaction': {
'negative_information': False,
'interaction_type': None,
'participant_a': {
'entity_type': None,
'entity_text': None,
'identifier': None
},
'participant_b': {
'entity_type': None,
'entity_text': None,
'identifier': None
}
}
}
def get_string(self):
return json.dumps(self.card)
def assemble_complex(stmt):
card = IndexCard()
card.card['submitter'] = global_submitter
card.card['evidence'] = get_evidence_text(stmt)
card.card['interaction']['interaction_type'] = 'complexes_with'
card.card['interaction'].pop('participant_b', None)
# NOTE: fill out entity_text
card.card['interaction']['participant_a']['entity_type'] = 'complex'
card.card['interaction']['participant_a']['entity_text'] = ['']
card.card['interaction']['participant_a'].pop('identifier', None)
card.card['interaction']['participant_a']['entities'] = []
for m in stmt.members:
p = get_participant(m)
card.card['interaction']['participant_a']['entities'].append(p)
return card
def assemble_modification(stmt):
card = IndexCard()
card.card['submitter'] = global_submitter
card.card['evidence'] = get_evidence_text(stmt)
mod_type = stmt.__class__.__name__.lower()
interaction = {}
interaction['negative_information'] = False
if mod_type.startswith('de'):
interaction['interaction_type'] = 'removes_modification'
mod_type = stmt.__class__.__name__.lower()[2:]
else:
interaction['interaction_type'] = 'adds_modification'
mod_type = stmt.__class__.__name__.lower()
interaction['modifications'] = [{
'feature_type': 'modification_feature',
'modification_type': mod_type,
}]
if stmt.position is not None:
pos = int(stmt.position)
interaction['modifications'][0]['location'] = pos
if stmt.residue is not None:
interaction['modifications'][0]['aa_code'] = stmt.residue
# If the statement is direct or there is no enzyme
if get_is_direct(stmt) or stmt.enz is None:
interaction['participant_a'] = get_participant(stmt.enz)
interaction['participant_b'] = get_participant(stmt.sub)
card.card['interaction'] = interaction
# If the statement is indirect, we generate an index card:
# SUB increases (GENERIC adds_modification ENZ)
else:
interaction['participant_a'] = get_participant(None)
interaction['participant_b'] = get_participant(stmt.sub)
card.card['interaction']['interaction_type'] = 'increases'
card.card['interaction']['negative_information'] = False
card.card['interaction']['participant_a'] = get_participant(stmt.enz)
card.card['interaction']['participant_b'] = interaction
return card
def assemble_selfmodification(stmt):
card = IndexCard()
card.card['submitter'] = global_submitter
card.card['evidence'] = get_evidence_text(stmt)
mod_type = stmt.__class__.__name__.lower()
if mod_type.endswith('phosphorylation'):
mod_type = 'phosphorylation'
else:
return None
interaction = {}
interaction['negative_information'] = False
interaction['interaction_type'] = 'adds_modification'
interaction['modifications'] = [{
'feature_type': 'modification_feature',
'modification_type': mod_type,
}]
if stmt.position is not None:
pos = int(stmt.position)
interaction['modifications'][0]['location'] = pos
if stmt.residue is not None:
interaction['modifications'][0]['aa_code'] = stmt.residue
# If the statement is direct or there is no enzyme
if get_is_direct(stmt) or stmt.enz is None:
interaction['participant_a'] = get_participant(stmt.enz)
interaction['participant_b'] = get_participant(stmt.enz)
card.card['interaction'] = interaction
return card
def assemble_translocation(stmt):
# Index cards don't allow missing to_location
if stmt.to_location is None:
return None
card = IndexCard()
card.card['submitter'] = global_submitter
card.card['evidence'] = get_evidence_text(stmt)
interaction = {}
interaction['negative_information'] = False
interaction['interaction_type'] = 'translocates'
if stmt.from_location is not None:
interaction['from_location_text'] = stmt.from_location
from_loc_id = cellular_components.get(stmt.from_location)
interaction['from_location_id'] = from_loc_id
interaction['to_location_text'] = stmt.to_location
to_loc_id = cellular_components.get(stmt.to_location)
interaction['to_location_id'] = to_loc_id
interaction['participant_a'] = get_participant(None)
interaction['participant_b'] = get_participant(stmt.agent)
card.card['interaction'] = interaction
return card
def get_participant(agent):
# Handle missing Agent as generic protein
if agent is None:
participant = {
'entity_text': [''],
'entity_type': 'protein',
'identifier': 'GENERIC'
}
return participant
# The Agent is not missing
text_name = agent.db_refs.get('TEXT')
if text_name is None:
text_name = agent.name
participant = {}
participant['entity_text'] = [text_name]
hgnc_id = agent.db_refs.get('HGNC')
uniprot_id = agent.db_refs.get('UP')
chebi_id = agent.db_refs.get('CHEBI')
pfam_def_ids = agent.db_refs.get('PFAM-DEF')
# If HGNC grounding is available, that is the first choice
if hgnc_id:
uniprot_id = hgnc_client.get_uniprot_id(hgnc_id)
if uniprot_id:
uniprot_mnemonic = str(uniprot_client.get_mnemonic(uniprot_id))
participant['identifier'] = 'UNIPROT:%s' % uniprot_mnemonic
participant['entity_type'] = 'protein'
elif chebi_id:
pubchem_id = chebi_client.get_pubchem_id(chebi_id)
participant['identifier'] = 'PUBCHEM:%s' % pubchem_id
participant['entity_type'] = 'chemical'
elif pfam_def_ids:
participant['entity_type'] = 'protein_family'
participant['entities'] = []
pfam_def_list = []
for p in pfam_def_ids.split('|'):
dbname, dbid = p.split(':')
pfam_def_list.append({dbname: dbid})
for pdi in pfam_def_list:
# TODO: handle non-uniprot protein IDs here
uniprot_id = pdi.get('UP')
if uniprot_id:
entity_dict = {}
uniprot_mnemonic = \
str(uniprot_client.get_mnemonic(uniprot_id))
gene_name = uniprot_client.get_gene_name(uniprot_id)
if gene_name is None:
gene_name = ""
entity_dict['entity_text'] = [gene_name]
entity_dict['identifier'] = 'UNIPROT:%s' % uniprot_mnemonic
entity_dict['entity_type'] = 'protein'
participant['entities'].append(entity_dict)
else:
participant['identifier'] = ''
participant['entity_type'] = 'protein'
features = []
not_features = []
# Binding features
for bc in agent.bound_conditions:
feature = {
'feature_type': 'binding_feature',
'bound_to': {
# NOTE: get type and identifier for bound to protein
'entity_type': 'protein',
'entity_text': [bc.agent.name],
'identifier': ''
}
}
if bc.is_bound:
features.append(feature)
else:
not_features.append(feature)
# Modification features
for mc in agent.mods:
feature = {
'feature_type': 'modification_feature',
'modification_type': mc.mod_type.lower(),
}
if mc.position is not None:
pos = int(mc.position)
feature['location'] = pos
if mc.residue is not None:
feature['aa_code'] = mc.residue
if mc.is_modified:
features.append(feature)
else:
not_features.append(feature)
# Mutation features
for mc in agent.mutations:
feature = {}
feature['feature_type'] = 'mutation_feature'
if mc.residue_from is not None:
feature['from_aa'] = mc.residue_from
if mc.residue_to is not None:
feature['to_aa'] = mc.residue_to
if mc.position is not None:
pos = int(mc.position)
feature['location'] = pos
features.append(feature)
if features:
participant['features'] = features
if not_features:
participant['not_features'] = not_features
return participant
def get_pmc_id(stmt):
pmc_id = ''
for ev in stmt.evidence:
pmc_id = id_lookup(ev.pmid, 'pmid')['pmcid']
if pmc_id is not None:
if not pmc_id.startswith('PMC'):
pmc_id = 'PMC' + pmc_id
else:
pmc_id = ''
return str(pmc_id)
def get_evidence_text(stmt):
ev_txts = [ev.text for ev in stmt.evidence if ev.text]
if not ev_txts:
sources = list(set([ev.source_api for ev in stmt.evidence]))
ev_txts = ['Evidence text not available in source database: %s' % \
', '.join(sources)]
return ev_txts
def get_is_direct(stmt):
'''Returns true if there is evidence that the statement is a direct
interaction. If any of the evidences associated with the statement
indicates a direct interatcion then we assume the interaction
is direct. If there is no evidence for the interaction being indirect
then we default to direct.'''
any_indirect = False
for ev in stmt.evidence:
if ev.epistemics.get('direct') is True:
return True
elif ev.epistemics.get('direct') is False:
# This guarantees that we have seen at least
# some evidence that the statement is indirect
any_indirect = True
if any_indirect:
return False
return True
| 0.001298 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.