content
stringlengths 5
1.05M
|
---|
"""Check CSV file structure for anomalies"""
from csv2shex.csvrow import CSVRow
def test_csvshape_validate_uristem_prefixed():
"""@@@"""
statement = CSVRow(
shapeID="@default",
propertyID="wdt:P31",
valueConstraint="wd:",
valueConstraintType="UriStem",
)
assert statement._validate_uristem()
def test_csvshape_validate_uristem_normal_uri():
"""@@@"""
statement = CSVRow(
shapeID="@default",
propertyID="wdt:P31",
valueConstraint="http://www.gmd.de/",
valueConstraintType="UriStem",
)
assert statement._validate_uristem()
def test_csvshape_validate_uristem_with_angle_brackets():
"""@@@"""
statement = CSVRow(
shapeID="@default",
propertyID="wdt:P31",
valueConstraint="<http://www.gmd.de/>",
valueConstraintType="UriStem",
)
statement._normalize_uristem()
assert statement._validate_uristem()
def test_csvshape_validate_uristem_colon_only():
"""@@@"""
statement = CSVRow(
shapeID="@default",
propertyID="wdt:P31",
valueConstraint=":",
valueConstraintType="UriStem",
)
statement._normalize_uristem()
assert statement._validate_uristem()
def test_csvshape_validate_uristem_not():
"""@@@"""
statement = CSVRow(
shapeID="@default",
propertyID="wdt:P31",
valueConstraint="foobar",
valueConstraintType="UriStem",
)
statement._normalize_uristem()
assert not statement._validate_uristem()
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest
import numpy as np
from pyscf import gto
from pyscf.nao import nao
mol = gto.M(
verbose = 1,
atom = '''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis = 'cc-pvdz',
)
class KnowValues(unittest.TestCase):
def test_ao_eval(self):
from pyscf.nao.m_ao_eval_libnao import ao_eval_libnao
from pyscf.nao.m_ao_eval import ao_eval
""" """
sv = nao(gto=mol)
ra = np.array([2.0333, 0.101, 2.333])
coords = np.array([[0.0333, 1.111, 3.333]])
ao_vals_lib = ao_eval_libnao(sv.ao_log, ra, 0, coords)
self.assertAlmostEqual(ao_vals_lib[0,0], 0.021725938009701302)
ao_vals_lib = ao_eval_libnao(sv.ao_log, ra, 1, coords)
self.assertAlmostEqual(ao_vals_lib[1,0], 0.0017709123325328384)
ra = 4.0*np.random.rand(3)
coords = 3.0*np.random.rand(10,3)
ao_vals_lib = ao_eval_libnao(sv.ao_log, ra, 0, coords)
ao_vals_py = ao_eval(sv.ao_log, ra, 0, coords)
for aocc1, aocc2 in zip(ao_vals_lib, ao_vals_py):
for ao1, ao2 in zip(aocc1, aocc2):
self.assertAlmostEqual(ao1, ao2)
if __name__ == "__main__": unittest.main()
|
from discord.ext import commands
class Base(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="me", hidden=True)
@commands.is_owner()
async def only(self, ctx):
await ctx.send(f"Hello {ctx.author.mention}")
@commands.command(name="hello", hidden=True)
async def hello(self, ctx):
await ctx.send(f"Hello {ctx.author.mention}")
@commands.group(
aliases=["e"], pass_context=True, invoke_without_command=True, hidden=True
)
@commands.is_owner()
async def ext(self, ctx):
cogs = list(self.bot.extensions.keys())
for index, cog in enumerate(cogs):
cogs[index] = cog.replace("scaffold.cogs.", "")
msg = "```fix\nLoaded Extensions:\n--------\n{}```".format(" ".join(cogs))
user = await self.bot.fetch_user(ctx.message.author.id)
await user.send(msg)
@ext.group(aliases=["unload"], pass_context=True)
@commands.is_owner()
async def unload_ext(self, ctx, *args):
self.bot.unload_extension("scaffold.cogs." + args[0])
@ext.group(aliases=["load"], pass_context=True)
@commands.is_owner()
async def load_ext(self, ctx, *args):
self.bot.load_extension("scaffold.cogs." + args[0])
def setup(bot):
bot.add_cog(Base(bot))
|
def can_make_word(word, blocks):
letters = [c for c in word]
def block_has_letter(block):
letter1, letter2 = block
return (letter1 in letters) or (letter2 in letters)
useful_blocks = [block for block in filter(block_has_letter, blocks)]
def selected_blocks(level, letters, blocks, tried_blocks, started_with_blocks):
if len(letters) == 0: # dbg_print("letters exhausted")
return True
elif len(blocks) == 0:
if len(tried_blocks) == 0: # dbg_print("blocks and tried blocks exhausted")
return False
elif set(tried_blocks) == set(started_with_blocks): # dbg_print("tried all blocks to no avail")
return False
else: # dbg_print(f"exhausted, reset to {tried_blocks}")
return selected_blocks(level, letters, tried_blocks, [], tried_blocks)
else:
block = blocks[0]
remaining_blocks = blocks[1:]
first_letter = letters[0]
remaining_letters = letters[1:]
if first_letter in block: # dbg_print(f"For {first_letter}, trying to select block {block}")
if selected_blocks(level + 1, remaining_letters, remaining_blocks + tried_blocks, [],
remaining_blocks + tried_blocks):
return True
else: # dbg_print(f"--did not work, trying without")
return selected_blocks(level + 1, letters, remaining_blocks, tried_blocks + [block],
started_with_blocks)
else: # dbg_print(f"For {first_letter}, skipping block {block} and trying next")
return selected_blocks(level, letters, remaining_blocks, tried_blocks + [block], started_with_blocks)
return selected_blocks(0, letters, useful_blocks, [], useful_blocks)
def main():
blocks = [
("B", "O"),
("X", "K"),
("D", "Q"),
("C", "P"),
("N", "A"),
("G", "T"),
("R", "E"),
("T", "G"),
("Q", "D"),
("F", "S"),
("J", "W"),
("H", "U"),
("V", "I"),
("A", "N"),
("O", "B"),
("E", "R"),
("F", "S"),
("L", "Y"),
("P", "C"),
("Z", "M"),
]
words = [("A", True),
("BARK", True),
("BOOK", False),
("TREAT", True),
("COMMON", False),
("SQUAD", True),
("CONFUSE", True)
]
for (word, expected_possible) in words:
is_possible = can_make_word(word, blocks)
print(f"Can make {word}: {is_possible}:", is_possible == expected_possible)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# @Time : 2020/8/21 20:04
# @公众号 :Python自动化办公社区
# @File : pdf_rd.py
# @Software: PyCharm
# @Description:
# pip install pdfminer3k
from io import StringIO
from pdfminer.pdfinterp import PDFResourceManager,process_pdf
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
# 打开pdf文件
pdf_file = open('静夜思.pdf', 'rb')
########默认操作#######
rsrcmgr = PDFResourceManager()
retstr = StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr=rsrcmgr,outfp=retstr,laparams=laparams)
process_pdf(rsrcmgr=rsrcmgr,device=device,fp=pdf_file)
device.close()
content = retstr.getvalue()
retstr.close()
pdf_file.close()
########默认操作#######
print(content)
|
# was stanza.models.pos.model
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from biaffine import BiaffineScorer
from hlstm import HighwayLSTM
from dropout import WordDropout
from char_model import CharacterModel
class Tagger(nn.Module):
def __init__(self, args, vocab, emb_matrix=None):
super().__init__()
self.vocab = vocab
self.args = args
self.use_pretrained = emb_matrix is not None
self.use_char = args['char_emb_dim'] > 0
self.use_word = args['word_emb_dim'] > 0
self.share_hid = args['pos_emb_dim'] < 1
self.unsaved_modules = []
def add_unsaved_module(name, module):
self.unsaved_modules += [name]
setattr(self, name, module)
# input layers
input_size = 0
if self.use_word:
# frequent word embeddings
self.word_emb = nn.Embedding(len(vocab['word']), self.args['word_emb_dim'], padding_idx=0)
input_size += self.args['word_emb_dim']
if not self.share_hid:
# pos embeddings
self.pos_emb = nn.Embedding(len(vocab['pos']), self.args['pos_emb_dim'], padding_idx=0)
if self.use_char:
self.charmodel = CharacterModel(args, vocab, bidirectional=args['char_bidir'])
self.trans_char = nn.Linear(self.charmodel.num_dir * self.args['char_hidden_dim'], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
if self.use_pretrained:
# pretrained embeddings, by default this won't be saved into model file
add_unsaved_module('pretrained_emb', nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix), freeze=True))
self.trans_pretrained = nn.Linear(emb_matrix.shape[1], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
# recurrent layers
self.taggerlstm = HighwayLSTM(input_size, self.args['tag_hidden_dim'], self.args['tag_num_layers'], batch_first=True, bidirectional=True, dropout=self.args['dropout'], rec_dropout=self.args['tag_rec_dropout'], highway_func=torch.tanh)
self.drop_replacement = nn.Parameter(torch.randn(input_size) / np.sqrt(input_size))
self.taggerlstm_h_init = nn.Parameter(torch.zeros(2 * self.args['tag_num_layers'], 1, self.args['tag_hidden_dim']))
self.taggerlstm_c_init = nn.Parameter(torch.zeros(2 * self.args['tag_num_layers'], 1, self.args['tag_hidden_dim']))
# classifiers
self.pos_hid = nn.Linear(self.args['tag_hidden_dim'] * 2, self.args['deep_biaff_hidden_dim'])
self.pos_clf = nn.Linear(self.args['deep_biaff_hidden_dim'], len(vocab['pos']))
self.pos_clf.weight.data.zero_()
self.pos_clf.bias.data.zero_()
if self.share_hid:
clf_constructor = lambda insize, outsize: nn.Linear(insize, outsize)
else:
self.feats_hid = nn.Linear(self.args['tag_hidden_dim'] * 2, self.args['composite_deep_biaff_hidden_dim'])
clf_constructor = lambda insize, outsize: BiaffineScorer(insize, self.args['pos_emb_dim'], outsize)
self.feats_clf = nn.ModuleList()
for l in vocab['feats'].lens():
if self.share_hid:
self.feats_clf.append(clf_constructor(self.args['deep_biaff_hidden_dim'], l))
self.feats_clf[-1].weight.data.zero_()
self.feats_clf[-1].bias.data.zero_()
else:
self.feats_clf.append(clf_constructor(self.args['composite_deep_biaff_hidden_dim'], l))
# criterion
self.crit = nn.CrossEntropyLoss(ignore_index=0) # ignore padding
self.drop = nn.Dropout(args['dropout'])
self.worddrop = WordDropout(args['word_dropout'])
def forward(self, word, word_mask, wordchars, wordchars_mask, pos, feats, pretrained, word_orig_idx, sentlens, wordlens):
def pack(x):
return pack_padded_sequence(x, sentlens, batch_first=True)
def get_batch_sizes(sentlens):
b = []
for i in range(max(sentlens)):
c = len([x for x in sentlens if x > i])
b.append(c)
return torch.tensor(b)
def pad(x):
return pad_packed_sequence(PackedSequence(x, batch_sizes), batch_first=True)[0]
inputs = []
if self.use_word:
word_emb = self.word_emb(word)
word_emb = pack(word_emb)
inputs += [word_emb]
batch_sizes = word_emb.batch_sizes
else:
batch_sizes = get_batch_sizes(sentlens)
if self.use_pretrained:
pretrained_emb = self.pretrained_emb(pretrained)
pretrained_emb = self.trans_pretrained(pretrained_emb)
pretrained_emb = pack(pretrained_emb)
inputs += [pretrained_emb]
if self.use_char:
char_reps = self.charmodel(wordchars, wordchars_mask, word_orig_idx, sentlens, wordlens)
char_reps = PackedSequence(self.trans_char(self.drop(char_reps.data)), char_reps.batch_sizes)
inputs += [char_reps]
lstm_inputs = torch.cat([x.data for x in inputs], 1)
lstm_inputs = self.worddrop(lstm_inputs, self.drop_replacement)
lstm_inputs = self.drop(lstm_inputs)
lstm_inputs = PackedSequence(lstm_inputs, inputs[0].batch_sizes)
lstm_outputs, _ = self.taggerlstm(lstm_inputs, sentlens, hx=(self.taggerlstm_h_init.expand(2 * self.args['tag_num_layers'], word.size(0), self.args['tag_hidden_dim']).contiguous(), self.taggerlstm_c_init.expand(2 * self.args['tag_num_layers'], word.size(0), self.args['tag_hidden_dim']).contiguous()))
lstm_outputs = lstm_outputs.data
pos_hid = F.relu(self.pos_hid(self.drop(lstm_outputs)))
pos_pred = self.pos_clf(self.drop(pos_hid))
preds = [pad(pos_pred).max(2)[1]]
pos = pack(pos).data
loss = self.crit(pos_pred.view(-1, pos_pred.size(-1)), pos.view(-1))
if self.share_hid:
feats_hid = pos_hid
clffunc = lambda clf, hid: clf(self.drop(hid))
else:
feats_hid = F.relu(self.feats_hid(self.drop(lstm_outputs)))
# TODO: self.training is never set, but check if this is a bug
#if self.training: pos_emb = self.pos_emb(pos) else:
pos_emb = self.pos_emb(pos_pred.max(1)[1])
clffunc = lambda clf, hid: clf(self.drop(hid), self.drop(pos_emb))
feats_preds = []
feats = pack(feats).data
for i in range(len(self.vocab['feats'])):
feats_pred = clffunc(self.feats_clf[i], feats_hid)
loss += self.crit(feats_pred.view(-1, feats_pred.size(-1)), feats[:, i].view(-1))
feats_preds.append(pad(feats_pred).max(2, keepdim=True)[1])
preds.append(torch.cat(feats_preds, 2))
return loss, preds
if __name__ == "__main__":
print("This file cannot be used on its own.")
print("To launch the tagger, use tagger.py instead of model.py")
|
import re
def match(command, settings):
return ('git' in command.script
and " is not a git command. See 'git --help'." in command.stderr
and 'Did you mean this?' in command.stderr)
def get_new_command(command, settings):
broken_cmd = re.findall(r"git: '([^']*)' is not a git command",
command.stderr)[0]
new_cmd = re.findall(r'Did you mean this\?\n\s*([^\n]*)',
command.stderr)[0]
return command.script.replace(broken_cmd, new_cmd, 1)
|
# Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa, mostre: a média de idade do grupo, qual é o nome do homem mais velho e quantas mulheres têm menos de 20 anos.
print('='*60)
print('FORMULÁRIO'.center(60))
print('='*60)
soma = 0
homem_velho = 0
mulher_jovem =0
for x in range (1,5):
print('{}ª PESSOA'.center(60).format(x))
print('-'* 60)
nome = input('Nome completo:')
idade = int(input('Idade:'))
print('''Informe o sexo conforme opções
[M] MASCULINO
[F] FEMININO''')
sexo = input('sua opção:').strip().upper()
# descobrir a média de idade do grupo
soma += idade
media = soma / x
if sexo == 'M' and homem_velho == 0:
homem_velho = idade
homem_velho_nome = nome
elif sexo == 'M' and homem_velho < idade:
homem_velho = idade
homem_velho_nome = nome
if sexo == 'F' and idade < 20:
mulher_jovem += 1
print('='*60)
print('RELATÓRIO'.center(60))
print('='*60)
print('O homem mais velho tem idade de {} anos e se chama {}'.format(homem_velho,homem_velho_nome))
print('Média de idade do grupo é de {} anos'.format(int(media)))
print('Existe {} mulheres com idade inferior a 20 anos'.format(mulher_jovem))
print('-'*60)
|
import ray
from agents.runners.actors.actor import Actor
from utils.environment import Environment
from utils.run_env import run_env
@ray.remote
class ImpalaActor(Actor):
def run(self, env_name, ps, global_buffer, epochs):
env = Environment(env_name)
print("actor start")
i = 0
while 1:
#for j in range():
weights = ray.get(ps.pull.remote())
self.brain.set_weights(weights)
run_env(env, self.brain, self.args['traj_length'], True)
data = self.brain.get_trajectories()
global_buffer.put_trajectories.remote(data)
print('actor finish') |
import os
import sys
import django
import requests
import json
from urllib import request, parse
from bs4 import BeautifulSoup
import gscholar
webapp_path = os.path.join('/Users/your_name/pyref')
sys.path.append(webapp_path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'pyref.settings'
django.setup()
from refs.models import Ref, get_ref_from_doi
s = "Zhang, S. B."
words = s.split(' ')
string =[]
for word in words:
string.insert(-1, word)
print(" ".join(string))
|
import re
import time
import polishNotation2
print(polishNotation2)
print(
('''
hello
derp
hi
''').split("\n")
)
print(re.split("(==|=|''')", "'derp' '''hello''' 3 == \"3\""))
#This is how to tokenize a string using a list of symbols in Python
def getMatchingRegex(theString):
pass
theRegexes = ['(?:regex|regular expression|regexp)', '(reversed|((?:written|spelled) (?:backwards|in reverse)))']
regexesToReturn = []
for current in theRegexes:
if re.compile(current).match(theString):
regexesToReturn += [current]
if(len(regexesToReturn) == 1):
return regexesToReturn[0]
else:
raise Exception("The regexes that match " + theString +" are "+ str(regexesToReturn))
def fixSyntax(theString):
if (("\\" in theString) or ("?:" in theString)):
return theString
else:
for current in [".", "?", "/", "}", "]", "{", "[", "-", "+", "*", "^", "%", "$", "#", "@", "!", "~", "`"]:
theString = theString.replace(current, "\\" + current)
theString = theString.replace("(", "(?:")
theString = filter(None, re.split("(\<\<[^\s]+\>\>)",theString))
#print(theString)
for i in range(0, len(theString)):
current = theString[i]
if not current.startswith("<<"):
theString[i] = "(" + current + ")"
#print(theString)
theString = "".join(theString)
#print(theString)
return theString.replace(" )<<", ") <<").replace(">>( ", ">> (")
def makeSyntaxArrayFromString(theString):
theArray = theString.split("\n")
print(theArray)
print fixSyntax("<<foo>> {}")
print(fixSyntax("it is (false|(not |un)true) that <<foo>>"))
print(fixSyntax("(it is true that) <<foo>>"))
fixSyntax("<<foo>> (is between) <<bar>> and <<baz>>")
print(fixSyntax("<<foo>> (is (greater|more) than) <<bar>> ((and|but) less than) <<baz>>"))
'''
print{every match of the regular expression "(h(?:a|e)llo|halo)" in the string "hallo hello halo"}
print{the longest string in ["hello", "herp", "wha"]}
print{the longest match of the regular expression "(h(?:a|e)llo|halo)" in the string "hallo hello halo"}
'''
'''
for i in range(0, len(syntaxRules)):
current = syntaxRules[i]
if(len(current) == 3):
if(type(current[1]) == dict):
if("Python" in current[1].keys()):
print(current[1]["Python"])
for current1 in current[0]:
print(" "+current1)
else:
print(current[1])
for current1 in current[0]:
print(" "+current1)
'''
def makeSyntaxRules(syntaxRules):
for i in range (0, len(syntaxRules)):
#print(i)
if(type(syntaxRules[i][0]) == str):
syntaxRules[i][0] = [syntaxRules[i][0]]
current = syntaxRules[i]
for j in range(0, (len(syntaxRules[i][0]))):
syntaxRules[i][0][j] = fixSyntax(syntaxRules[i][0][j])
#print(current)
theOutput = current[1]
if(type(theOutput) == str and (len(current) > 2)):
syntaxRules[i][1] = {"Python":theOutput}
#raise Exception("In the list " + str(current) + ", " + theOutput + " is a string, but it should be a dictionary")
return syntaxRules
def testMacro(syntaxRules, theInput):
syntaxRules = makeSyntaxRules(syntaxRules)
pythonSyntaxArray = polishNotation2.makeReallyNewInfoArray(syntaxRules, "Python")
return polishNotation2.testMacro(theInput, pythonSyntaxArray)
#syntaxRules = makeSyntaxRules(syntaxRules))
print(10 ^ 3)
|
from setuptools import setup
setup(
name='pylint-ci',
version='1.0.0',
packages=['pylint_ci'],
install_requires=['pylint'],
entry_points={
'console_scripts': ['pylint-ci = pylint_ci.__main__:main'],
},
)
|
from .models import *
from rest_framework import serializers
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Books
fields = '__all__'
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
|
import asyncio
import time
import random
import os
EXE = 1
# REPRESENTA UMA TAREFA COOPERATIVA (COROUTINES)
async def minhatarefa(n):
global count
print(f'Tarefa {n} começou')
t = random.randint(1,3)
await asyncio.sleep(t)
#time.sleep(t)
if EXE == 4 and n == 5:
os._exit(0) # Aborta o processo imediatamente
count += 1
print(f'Tarefa {n} terminou')
# LANCA AS TAREFAS
async def main():
tarefas = []
for i in range(10) :
t = asyncio.create_task( minhatarefa(i) )
tarefas.append(t)
# espera as tarefas concorrentes terminarem
print('Tarefas lançadas ...')
for t in tarefas : await t
print('Pronto para continuar ...')
#--------------------------------------------------------------------------
# PROCESSO PAI
if __name__ == '__main__':
count = 0
start = time.time()
asyncio.run(main())
print("Tarefas lançadas! ", count)
print('Tempo decorrido', time.time() - start ) |
from django.forms import widgets, ModelForm, Form
from django import forms
from django.urls import reverse_lazy
from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm, SetPasswordForm, \
PasswordChangeForm, UserCreationForm, UserChangeForm
from localflavor.br.validators import BRCPFValidator
from parsley.decorators import parsleyfy
from .models import Usuario, CarteiraCriptomoeda, Estrategia
class CustomUserCreationForm(UserCreationForm):
"""
Classe do formulário para criação de usuário pelo Django Admin
"""
class Meta:
model = Usuario
fields = ("email",)
class CustomUserChangeForm(UserChangeForm):
"""
Classe do formulário para alterar usuário pelo Django Admin
"""
class Meta:
model = Usuario
fields = '__all__'
@parsleyfy
class EstrategiaForm(ModelForm):
"""
Classe do formulário de cadastro do Usuário
"""
# Classe de Metadados
class Meta:
model = Estrategia
# Pegar os campos do model
fields = ['nome', 'template']
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
super(EstrategiaForm, self).__init__(*args, **kwargs)
# Adicionar a classe CSS 'form-control' para todos os campos do formulário
# exceto o campo 'usuario'
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
form_name = 'estrategia_form'
scope_prefix = 'dados_estrategia'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
templates = [('Compra e venda EMA 20', 'Compra e venda EMA 20'), \
('Compra e venda MACD', 'Compra e venda MACD'), ]
nome = forms.CharField(
label='Nome da Estratégia',
max_length=128,
required=True,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Nome da Estratégia",
}),
help_text='Digite o nome da estratégia'
)
template = forms.ChoiceField(
label='Modelo de estratégia',
required=True,
widget=widgets.Select(attrs={
'data-parsley-trigger':"focusin focusout",
}),
choices=templates,
help_text='Escolha o modelo de estratégia'
)
initial = {
'nome': '',
'template': '',
}
@parsleyfy
class ChangePasswordForm(PasswordChangeForm):
"""
Classe do formulário para alterar senha quando o usuário está logado
"""
# Classe de Metadados
class Meta:
fields = ['old_password', 'new_password1', 'new_password2']
parsley_extras = {
'new_password1': {
'remote': reverse_lazy('parsley-validar-senha'),
'remote-message': "Digite uma senha com números e caracteres"
},
'new_password2': {
'equalto': "new_password1",
'error-message': "As senhas digitadas não são iguais",
},
}
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
super(ChangePasswordForm, self).__init__(*args, **kwargs)
# Adicionar a classe CSS 'form-control' para todos os campos do formulário
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
form_name = 'reset-form'
scope_prefix = 'dados_reset'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
# Senha atual
old_password = forms.CharField(
label='Senha atual',
max_length=16,
required=True,
widget=widgets.PasswordInput(attrs={
'placeholder': "Senha atual",
'autocomplete': "off",
})
)
# Nova senha
new_password1 = forms.CharField(
label='Nova senha',
max_length=16,
min_length=8,
required=True,
widget=widgets.PasswordInput(attrs={
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "Senha",
'autocomplete': "off",
}),
error_messages={
'min_length': "O tamanho da senha é muito curta.",
}
)
# Confirmar a nova senha
new_password2 = forms.CharField(
label='Confirmar a nova senha',
max_length=16,
min_length=8,
required=True,
widget=widgets.PasswordInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Senha",
'autocomplete': "off",
}),
help_text='Digite a sua senha novamente',
error_messages={
'min_length': "O tamanho da senha é muito curta.",
}
)
initial = {
'old_password': '',
'new_password1': '',
'new_password2': '',
}
@parsleyfy
class ResetForm(SetPasswordForm):
"""
Classe do formulário para alterar senha
"""
# Classe de Metadados
class Meta:
fields = ['new_password1', 'new_password2']
parsley_extras = {
'new_password1': {
'remote': reverse_lazy('parsley-validar-senha'),
'remote-message': "Digite uma senha com números e caracteres"
},
'new_password2': {
'equalto': "new_password1",
'error-message': "As senhas digitadas não são iguais",
},
}
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
super(ResetForm, self).__init__(*args, **kwargs)
# Adicionar a classe CSS 'form-control' para todos os campos do formulário
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
form_name = 'reset-form'
scope_prefix = 'dados_reset'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
# Senha
new_password1 = forms.CharField(
label='Senha',
max_length=16,
min_length=8,
required=True,
widget=widgets.PasswordInput(attrs={
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "Senha",
'autocomplete': "off",
}),
error_messages={
'min_length': "O tamanho da senha é muito curta.",
}
)
# Confirmar Senha
new_password2 = forms.CharField(
label='Reinsira sua senha',
max_length=16,
min_length=8,
required=True,
widget=widgets.PasswordInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Senha",
'autocomplete': "off",
}),
help_text='Digite a sua senha novamente',
error_messages={
'min_length': "O tamanho da senha é muito curta.",
}
)
initial = {
'new_password1': '',
'new_password2': '',
}
@parsleyfy
class CheckResetForm(PasswordResetForm):
"""
Classe do formulário para verificar redifinição de senha por email
"""
# Classe de Metadados
class Meta:
fields = ['email']
parsley_extras = {
'email': {
'remote': reverse_lazy('parsley-verificar-email'),
'remote-message': "O Email digitado não está cadastrado no sistema."
},
}
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
super(CheckResetForm, self).__init__(*args, **kwargs)
# Adicionar a classe CSS 'form-control' para todos os campos do formulário
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
form_name = 'reset-form'
scope_prefix = 'dados_reset'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
# Email
email = forms.EmailField(
label='Email',
max_length=50,
min_length=3,
required=True,
widget=widgets.EmailInput(attrs={
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "email@email.com",
}),
error_messages={
'min_length': "Email inválido.",
'invalid': "Email inválido.",
}
)
initial = {
'email': '',
}
@parsleyfy
class LoginForm(AuthenticationForm):
"""
Classe do formulário de autenticação de Login
"""
# Classe de Metadados
class Meta:
fields = ['username', 'password']
parsley_extras = {
'username': {
'remote': reverse_lazy('parsley-verificar-email'),
'remote-message': "O Email digitado não está cadastrado no sistema."
},
}
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
super(LoginForm, self).__init__(*args, **kwargs)
# Adicionar a classe CSS 'form-control' para todos os campos do formulário
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
def confirm_login_allowed(self, user):
"""
Metódo para confirmar a autorização de Login
"""
# Verifica se o usuário está ativo
if not user.is_active:
raise forms.ValidationError(('Sua conta não está ativa.' \
'Verifique as mensagens em seu email.'),
code='inactive',)
form_name = 'login-form'
scope_prefix = 'dados_login'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
# Email
username = forms.EmailField(
label='Email',
max_length=50,
min_length=3,
required=True,
widget=widgets.EmailInput(attrs={
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "email@email.com",
}),
error_messages={
'min_length': "Email inválido.",
'invalid': "Email inválido.",
}
)
password = forms.CharField(
label='Senha',
max_length=16,
required=True,
widget=widgets.PasswordInput(attrs={
'placeholder': "Senha",
})
)
initial = {
'username': '',
'password': '',
}
@parsleyfy
class VenderCriptomoedaForm(Form):
"""
Classe do formulário de venda de criptomoeda
"""
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
lista = kwargs.pop('lista')
super(VenderCriptomoedaForm, self).__init__(*args, **kwargs)
# Adiciona a lista de criptomoedas atualizada
self.fields['criptomoeda'].choices = lista
# Adicionar as classes CSS
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
form_name = 'criptomoeda_form'
scope_prefix = 'dados_criptomoeda'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
criptomoeda = forms.ChoiceField(
label='Criptomoeda',
required=True,
widget=widgets.Select(attrs={
'data-parsley-trigger':"focusin focusout",
}),
help_text='Escolha a criptomoeda desejada.'
)
parar_compras = forms.BooleanField(
label='Parar compras de criptomoedas',
required=False,
widget=widgets.CheckboxInput(),
initial=False,
help_text='Parar todas as compras de criptomoedas porém, as ordens ativas permanecem \
até serem vendidas.'
)
initial = {
'criptomoeda': '',
'parar_compras': False,
}
@parsleyfy
class ComprarCriptomoedaForm(Form):
"""
Classe do formulário de compra de criptomoeda
"""
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
lista = kwargs.pop('lista')
super(ComprarCriptomoedaForm, self).__init__(*args, **kwargs)
# Adiciona a lista de criptomoedas atualizada
self.fields['criptomoeda'].choices = lista
# Adicionar as classes CSS
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
form_name = 'criptomoeda_form'
scope_prefix = 'dados_criptomoeda'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
criptomoeda = forms.ChoiceField(
label='Criptomoeda',
required=True,
widget=widgets.Select(attrs={
'data-parsley-trigger':"focusin focusout",
}),
help_text='Escolha a criptomoeda desejada.'
)
initial = {
'criptomoeda': '',
}
@parsleyfy
class CarteiraForm(ModelForm):
"""
Classe do formulário de cadastro do Usuário
"""
# Classe de Metadados
class Meta:
model = CarteiraCriptomoeda
# Pegar os campos do model
fields = ['simulacao', 'saldo', 'chave_api', 'chave_secreta', 'num_operacoes']
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
super(CarteiraForm, self).__init__(*args, **kwargs)
# Adicionar a classe CSS 'form-control' para todos os campos do formulário
# exceto o campo 'usuario'
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
form_name = 'carteira_form'
scope_prefix = 'dados_carteira'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
simulacao = forms.BooleanField(
label='Modo de simulação',
required=False,
widget=widgets.CheckboxInput(),
initial=True,
help_text='Com o modo de simulação ativado não será utilizado o saldo da carteira \
se for verdadeira.'
)
saldo = forms.FloatField(
label='Saldo a ser utilizado',
required=True,
widget=widgets.NumberInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Saldo",
'min': '0.006',
'step':'0.001',
'autocomplete': "off",
}),
help_text='Digite o saldo em Bitcoins a ser utilizado da carteira. \
Minímo é de 0.006 Bitcoin.'
)
chave_api = forms.CharField(
label='Chave da API',
max_length=128,
required=False,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Chave da API",
'autocomplete': "off",
}),
help_text='Digite a chave da API'
)
chave_secreta = forms.CharField(
label='Chave Secreta',
max_length=128,
required=False,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Chave Secreta",
'autocomplete': "off",
}),
help_text='Digite a chave secreta da API'
)
num_operacoes = forms.IntegerField(
label='Operações',
required=True,
widget=widgets.NumberInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Número de operações",
'max': '3',
'min': '1',
'step':'1',
'autocomplete': "off",
}),
help_text='Digite o número máximo de operações que o Robô irá efetuar simultaneamente, \
utilizando o saldo. Máximo 3 operações e minímo 1 operação.'
)
initial = {
'simulacao': True,
'saldo': '',
'chave_api': '',
'chave_secreta': '',
'num_operacoes': '',
}
@parsleyfy
class AtualizarUsuarioForm(ModelForm):
"""
Classe do formulário de atualização de dados cadastrais do Usuário
"""
# Classe de Metadados
class Meta:
model = Usuario
# Pegar todos campos do model
fields = ['first_name', 'last_name', 'cpf', 'telefone', 'email']
parsley_extras = {
'cpf': {
'remote': reverse_lazy('parsley-validar-cpf'),
'remote-message': "O CPF digitado está sendo utilizado por outro usuário \
ou é inválido."
},
'email': {
'remote': reverse_lazy('parsley-validar-email'),
'remote-message': "O Email digitado está sendo utilizado por outro usuário."
},
}
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
super(AtualizarUsuarioForm, self).__init__(*args, **kwargs)
# Adicionar a classe CSS 'form-control' para todos os campos do formulário
# exceto o campo 'termos_uso'
for field in self.fields:
if field != 'termos_uso':
self.fields[field].widget.attrs['class'] = 'form-control'
form_name = 'cadastro_form'
scope_prefix = 'dados_cadastro'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
# Iniciliza o validador de CPF
validar_cpf = BRCPFValidator()
first_name = forms.CharField(
label='Nome',
max_length=50,
min_length=2,
required=True,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Nome",
}),
help_text='Digite o seu nome',
error_messages={
'min_length': "O tamanho do nome é muito curto.",
}
)
last_name = forms.CharField(
label='Sobrenome',
max_length=50,
min_length=2,
required=True,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Sobrenome",
}),
help_text='Digite o seu sobrenome',
error_messages={
'min_length': "O tamanho do sobrenome é muito curto.",
}
)
cpf = forms.CharField(
label='CPF',
max_length=14,
min_length=14,
required=False,
disabled=True,
validators=[validar_cpf],
widget=widgets.TextInput(attrs={
'data-mask':"000.000.000-00",
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "000.000.000-00",
}),
help_text='CPF cadastrado',
error_messages={
'min_length': "O tamanho do CPF é muito curto.",
'invalid': "O CPF digitado é inválido.",
}
)
telefone = forms.CharField(
label='Telefone',
max_length=14,
min_length=13,
required=True,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "(00)00000-0000",
}),
help_text='Digite o seu número de telefone',
error_messages={
'min_length': "O tamanho do telefone é muito curto.",
}
)
email = forms.EmailField(
label='Email',
max_length=50,
min_length=3,
required=True,
widget=widgets.EmailInput(attrs={
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "email@email.com",
}),
help_text='Digite o seu email',
error_messages={
'min_length': "O tamanho do email é muito curto.",
'invalid': "Digite um endereço de email válido.",
}
)
@parsleyfy
class UsuarioForm(ModelForm):
"""
Classe do formulário de cadastro do Usuário
"""
# Classe de Metadados
class Meta:
model = Usuario
# Pegar todos campos do model
fields = ['first_name', 'last_name', 'cpf', 'telefone', 'email', 'password']
parsley_extras = {
'cpf': {
'remote': reverse_lazy('parsley-validar-cpf'),
'remote-message': "O CPF digitado está sendo utilizado por outro usuário \
ou é inválido."
},
'email': {
'remote': reverse_lazy('parsley-validar-email'),
'remote-message': "O Email digitado está sendo utilizado por outro usuário."
},
'password': {
'remote': reverse_lazy('parsley-validar-senha'),
'remote-message': "Digite uma senha com números e caracteres"
},
'confirmar_senha': {
'equalto': "password",
'error-message': "As senhas digitadas não são iguais",
},
}
def __init__(self, *args, **kwargs):
"""
Metódo de inicialização do Form
"""
super(UsuarioForm, self).__init__(*args, **kwargs)
# Adicionar a classe CSS 'form-control' para todos os campos do formulário
# exceto o campo 'termos_uso'
for field in self.fields:
if field != 'termos_uso':
self.fields[field].widget.attrs['class'] = 'form-control'
def save(self, commit=True):
"""
Metódo para salvar o Form
"""
usuario = super(UsuarioForm, self).save(commit=False)
# Criptografar senha do usuário antes de realizar o Commit
usuario.set_password(self.cleaned_data["password"])
if commit:
usuario.save()
return usuario
form_name = 'cadastro_form'
scope_prefix = 'dados_cadastro'
# Adicionar a classe CSS 'djng-field-required' para campos obrigatórios
required_css_class = 'field-required'
# Iniciliza o validador de CPF
validar_cpf = BRCPFValidator()
first_name = forms.CharField(
label='Nome',
max_length=50,
min_length=2,
required=True,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Nome",
}),
help_text='Digite o seu nome',
error_messages={
'min_length': "O tamanho do nome é muito curto.",
}
)
last_name = forms.CharField(
label='Sobrenome',
max_length=50,
min_length=2,
required=True,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Sobrenome",
}),
help_text='Digite o seu sobrenome',
error_messages={
'min_length': "O tamanho do sobrenome é muito curto.",
}
)
cpf = forms.CharField(
label='CPF',
max_length=14,
min_length=14,
required=True,
validators=[validar_cpf],
widget=widgets.TextInput(attrs={
'data-mask':"000.000.000-00",
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "000.000.000-00",
}),
help_text='Digite o seu CPF',
error_messages={
'min_length': "O tamanho do CPF é muito curto.",
'invalid': "O CPF digitado é inválido.",
}
)
telefone = forms.CharField(
label='Telefone',
max_length=14,
min_length=13,
required=True,
widget=widgets.TextInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "(00)00000-0000",
}),
help_text='Digite o seu número de telefone',
error_messages={
'min_length': "O tamanho do telefone é muito curto.",
}
)
email = forms.EmailField(
label='Email',
max_length=50,
min_length=3,
required=True,
widget=widgets.EmailInput(attrs={
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "email@email.com",
}),
help_text='Digite o seu email',
error_messages={
'min_length': "O tamanho do email é muito curto.",
'invalid': "Digite um endereço de email válido.",
}
)
password = forms.CharField(
label='Senha',
max_length=16,
min_length=8,
required=True,
widget=widgets.PasswordInput(attrs={
'data-parsley-trigger':"focusin focusout",
'data-parsley-remote-options':'{ "type": "POST", "dataType": "json" }',
'placeholder': "Senha",
'autocomplete': "off",
}),
help_text='Digite a sua senha',
error_messages={
'min_length': "O tamanho da senha é muito curta.",
}
)
confirmar_senha = forms.CharField(
label='Reinsira sua senha',
max_length=16,
min_length=8,
required=True,
widget=widgets.PasswordInput(attrs={
'data-parsley-trigger':"focusin focusout",
'placeholder': "Senha",
'autocomplete': "off",
}),
help_text='Digite a sua senha novamente',
error_messages={
'min_length': "O tamanho da senha é muito curta.",
}
)
termos_uso = forms.BooleanField(
label='Aceito os Termos de Uso',
required=True,
widget=widgets.CheckboxInput(attrs={
'data-parsley-trigger':"focusin focusout",
}),
error_messages={
'required': "É obrigatório aceitar os Termos de Uso.",
}
)
initial = {
'first_name': '',
'last_name': '',
'cpf': '',
'email': '',
'telefone': '',
'password': '',
'confirmar_senha': '',
'termos_uso': False,
}
|
from django.shortcuts import render, redirect, reverse
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
# from google.cloud import vision
# from google.cloud.vision import types
import io
import os
from .models import Contact, Address, Image
from .forms import ImageForm, NewContact
from biz.settings import BASE_DIR
#instantiates a client, specifying the project credentials (json file)
# path_to_json = os.path.join(BASE_DIR, 'biz\\biz-contacts-service-account.json')
# print(path_to_json)
# vision_client = vision.Client.from_service_account_json(path_to_json, "biz-contacts")
# Create your views here.
def index(request):
session_id = request.session
if request.user.is_authenticated:
return redirect('dashboard/')
else:
return render(request, 'bizcontacts/index.html')
#@login_required()
def dashboard(request):
user = request.user
contacts = Contact.objects.filter(user=user)
for contact in contacts:
contact.cell_displayable = contact.make_displayable_phone_number(contact.cell_number)
contact.work_displayable = contact.make_displayable_phone_number(contact.cell_number)
context = {'contact_list': contacts}
return render(request,'bizcontacts/dashboard.html', context)
@login_required()
def dashboard_search(request):
user = request.user
search_term = request.GET['q']
contact = Contact.objects.filter(user=user).filter(name__icontains=search_term)
print(contact)
context = {'contact_list': contact.values()}
html = render_to_string('bizcontacts/cards.html',
context,
request=request,
)
return JsonResponse({'html': html})
def signup(request):
if (True):
email = request.POST['signup_email']
password = request.POST['password']
confirm_password = request.POST['confirm_password']
if password == confirm_password:
# we are good to make a user
user = User.objects.create_user(email, email, password)
user.save()
user = authenticate(request, username=email, password=password)
if (user is not None):
login(request, user)
return redirect('dashboard')
def login_view(request):
username = request.POST['email']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
print(user)
print(request)
if (user is not None):
login(request, user)
return redirect(reverse('dashboard'))
else:
return redirect(reverse('index'))
def logout_view(request):
logout(request)
return redirect(reverse('index'))
def create_contact(request):
form = NewContact()
if request.method == 'GET':
context = {'form': form}
html_form = render_to_string('bizcontacts/partial_contact_create.html',
context,
request=request,
)
return JsonResponse({'html_form': html_form})
else:
# add some checks to make sure that the contact info is in the correct form
# if it isn't we need to do something else
name = request.POST['name']
email = request.POST['email']
business_name = request.POST['business_name']
website = request.POST['website']
cell_number = request.POST['cell_number']
work_number = request.POST['work_number']
notes = request.POST['notes']
user = request.user
contact = Contact(name=name, email=email, business_name=business_name, user=user, cell_number=cell_number, work_number=work_number, notes=notes, website=website)
contact.save()
return redirect(reverse('dashboard'))
def image_upload(request):
print(request.FILES)
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
form.save()
print(request.FILES['front_url'])
parse_image(request.FILES['front_url'])
return redirect(reverse('dashboard'))
else:
form = ImageForm()
return render(request, 'bizcontacts/image_upload_form.html', {
'form': form
})
def parse_image(image_file):
client = vision.ImageAnnotatorClient()
file_name = os.path.join(os.path.dirname(__file__, image_file))
with io.open(file_name, 'rb') as vision_file:
content = vision_file.read()
image = types.Image(content=content)
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels')
for label in labels:
print(label.description)
|
import math
import numpy as N
import matplotlib.pyplot as P
from scipy.optimize import curve_fit
import sys
import os
def analytical_C( angle, p1, p2 ):
angle = N.radians( angle )
return p1*3.0/2.0*( (1-N.cos( angle ))/2.0*N.log( (1-N.cos( angle ))/2.0 )-(1-N.cos( angle ))/12.0+1/3 )+0.5+p2
L = []
D = {}
for i in range( 5, 185, 5 ):
L.append( i )
D[ i ] = []
inFile = open( os.path.join( sys.argv[2], sys.argv[3] ), "r" )
lines = inFile.readlines()
inFile.close()
for i in range( len( lines ) ):
X = lines[i].split(' ')
ANG_SEP = X[0][: len( X[0] )-1 ]
C = X[1][: len( X[1] )-1 ]
if C != 0:
for j in range( len( L ) ):
if float( ANG_SEP ) < L[j]:
D[ L[j] ].append( ( float(ANG_SEP), float(C) ) )
break
xdata = []
ydata = []
for i in range( len( L ) ):
if len( D[ L[i] ] ) > 0:
CORR = 0
ANG = 0
for j in range( len( D[ L[i] ] ) ):
CORR += D[ L[i] ][j][1]
ANG += D[ L[i] ][j][0]
CORR /= len( D[ L[i] ] )
ANG /= len( D[ L[i] ] )
sd_CORR = 0
for j in range( len( D[ L[i] ] ) ):
sd_CORR += ( D[ L[i] ][j][1]-CORR )**2
sd_CORR /= len( D[ L[i] ] )
sd_CORR = math.sqrt( sd_CORR )
sd_ANG = 0
for j in range( len( D[ L[i] ] ) ):
sd_ANG += ( D[ L[i] ][j][0]-ANG )**2
sd_ANG /= len( D[ L[i] ] )
sd_ANG = math.sqrt( sd_ANG )
P.scatter( ANG, CORR, s = 15 )
P.errorbar( ANG, CORR, xerr = sd_ANG, yerr = sd_CORR, fmt = '', color = 'b' )
xdata.append( ANG )
ydata.append( CORR )
xdata = N.array( xdata )
ydata = N.array( ydata )
A, B = curve_fit( analytical_C, xdata, ydata )
P.plot( N.linspace( 0.001, 180, 1000, endpoint = True ), analytical_C( N.linspace( 0.001, 180, 1000, endpoint = True ), A[0], A[1] ) )
save_path = sys.argv[2]
P.title( "HD curve for "+sys.argv[3] )
P.xlabel("Angular separation between two pulsars (degrees)", fontsize=14, color="black")
P.ylabel("Correlation between two pulsars", fontsize=14, color="black")
P.savefig( os.path.join( save_path, sys.argv[3][:len( sys.argv[3] )-4] )+".png" )
|
"""
.. moduleauthor:: Stephen Raymond Ferg and Robert Lugg (active)
.. default-domain:: py
.. highlight:: python
Version |release|
"""
# Starting and global variables
rootWindowPosition = "+300+200"
PROPORTIONAL_FONT_FAMILY = ("MS", "Sans", "Serif")
MONOSPACE_FONT_FAMILY = "Courier"
PROPORTIONAL_FONT_SIZE = 10
# a little smaller, because it is more legible at a smaller size
MONOSPACE_FONT_SIZE = 9
TEXT_ENTRY_FONT_SIZE = 12 # a little larger makes it easier to see
STANDARD_SELECTION_EVENTS = ["Return", "Button-1", "space"]
|
from collections import defaultdict
INF=float("inf")
class Graph():
def __init__(self, vertices):
self.vertices = vertices
self.graph = []
def addWeights(self,u,v,w):
self.graph.append([u,v,w])
distances = {}
parents = {}
def bellmanford(graph, Start):
for iterations in range(len(vertices) - 1):
for u,v,w in graph.graph:
relax(u,v,w)
for u, v ,w in graph.graph:
if distance[u] != INF and distance[u] + w < distance[v]:
print ("This graph has a negative cycle!")
return None
def relax(u,v,w):
if distances[u] != INF and distance[u] + w < distance[v]:
distance[v] = distance[u] + w
def initialize(graph, start):
distances = {}
parents = {}
for vertices in graphraph.vertices:
distances[i] = INF
parents[i] = None
distances[start] = 0
|
# -*- coding: utf-8 -*-
"""
cues.cue
========
This module contains the class for creating and instantiating `Cue` objects.
"""
import subprocess
from abc import abstractmethod
from collections import deque
from typing import Deque
from . import utils
from .canvas import Canvas
class Cue(Canvas):
"""The abstract base class for all child Cue objects.
Note
----
This class contains abstractmethods which means you should not instantiate
it.
Attributes
----------
_name : str
The name of the Cue instance.
_message : str
Instructions or useful information regarding the prompt for the user.
keys : dict
Blend of different keypresses.
listen_for_key : FunctionType
Function that listens for keypresses based on OS.
_answer : dict
The answer to return once the user successfully responds to a Cue object.
"""
__name__ = 'Cue'
__module__ = 'cues'
def __init__(self, name: str, message: str):
"""
Parameters
----------
name
The name of the Cue instance.
message
Instructions or useful information regarding the prompt for the user.
"""
super().__init__()
# Enables color in the console for Windows machines:
if utils.is_windows():
subprocess.call('color', shell=True)
if isinstance(name, str):
self._name = name
else:
raise TypeError(f"'{type(name)}' object is not a str object")
if isinstance(message, str):
self._message = message
else:
raise TypeError(f"'{type(message)}' object is not a str object")
# Gathers all possible key presses into a dict:
self.keys = utils.get_keys()
# Chooses which key listening function to use based on OS:
self.listen_for_key = utils.get_listen_function()
self._answer = None
@property
def answer(self):
return self._answer
@answer.setter
def answer(self, answer: dict):
self._answer = answer
@abstractmethod
def send(self):
pass
@abstractmethod
def _draw(self):
pass
@staticmethod
def create_deque(lis: list, length: int = None) -> Deque[str]:
"""Returns a deque object containing strings.
Parameters
----------
lis
A list of objects that can be converted to str objects.
length : int, optional
A value that can be used to set the maxlen parameter of the deque()
function.
Returns
-------
Deque of str
A list converted into a deque with a set maxlen.
"""
container = []
for elem in lis:
container.append(str(elem))
d = deque(container, maxlen=(length or len(container)))
return d
|
"""
Tests for the properties module
"""
from bluebird.utils.properties import AircraftProperties
from bluebird.utils.types import LatLon
from tests.data import TEST_SCENARIO
def test_aircraft_properties_from_data():
aircraft_data = TEST_SCENARIO["aircraft"][0]
AircraftProperties.from_data(aircraft_data) == AircraftProperties(
aircraft_type=aircraft_data["type"],
altitude=aircraft_data["currentFlightLevel"],
callsign=aircraft_data["callsign"],
cleared_flight_level=aircraft_data["clearedFlightLevel"],
ground_speed=None,
heading=None,
initial_flight_level=aircraft_data["currentFlightLevel"],
position=LatLon(
aircraft_data["startPosition"][0], aircraft_data["startPosition"][1]
),
requested_flight_level=aircraft_data["requestedFlightLevel"],
route_name=None,
vertical_speed=None,
)
|
from pygame_pixelarray import *
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import MITgcmutils as mit
plt.ion()
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
dir0 = 'tmp_energy7/'
filets = 'diag_ocnSnap*'
filepe = 'tracer_wb*'
fileave = 'diag_ocnTave*'
flag_grid = 0
alphat = 2e-4
betas = 7.4e-4
#%==================== LOAD FIELDS ===================================
RC = mit.rdmds(dir0+'RC*')
RA = mit.rdmds(dir0+'RA*')
DRF = mit.rdmds(dir0+'DRF*')
hFacC = mit.rdmds(dir0+'hFacC*')
si_z,si_y,si_x = hFacC.shape
hFacC2 = np.where(hFacC < 1, np.NaN, 1)
RA = RA[None,:,:]
i = 1
iterst = mit.mds.scanforfiles(dir0 + filets)
itersp = mit.mds.scanforfiles(dir0 + filepe)
# t0 = mit.rdmds(dir0 + filets,iterst[i],rec=0)
# t1 = mit.rdmds(dir0 + filets,iterst[i+1],rec=0)
# s0 = mit.rdmds(dir0 + filets,iterst[i],rec=1)
# s1 = mit.rdmds(dir0 + filets,iterst[i+1],rec=1)
#w0 = mit.rdmds(dir0 + filew,iterst[i],rec=0)
#w1 = mit.rdmds(dir0 + filew,iterst[i+1],rec=0)
wav = mit.rdmds(dir0 + fileave,itersp[i],rec=4)
dtdt = mit.rdmds(dir0 + filepe,itersp[i],rec=0)
dsdt = mit.rdmds(dir0 + filepe,itersp[i],rec=1)
advrt = mit.rdmds(dir0 + filepe,itersp[i],rec=2)
advxt = mit.rdmds(dir0 + filepe,itersp[i],rec=3)
advyt = mit.rdmds(dir0 + filepe,itersp[i],rec=4)
advrs = mit.rdmds(dir0 + filepe,itersp[i],rec=5)
advxs = mit.rdmds(dir0 + filepe,itersp[i],rec=6)
advys = mit.rdmds(dir0 + filepe,itersp[i],rec=7)
wb2 = mit.rdmds(dir0 + filepe,itersp[i],rec=8)
wb = mit.rdmds(dir0 + filepe,itersp[i],rec=9)
dtdt = dtdt/86400
dsdt = dsdt/86400
# t0 = np.where(t0 == 0,np.NaN,t0)
# t1 = np.where(t1 == 0,np.NaN,t1)
ix = np.int(si_x/2)
advrt = np.append(advrt,advrt[None,0,:,:],axis=0)
advrs = np.append(advrs,advrs[None,0,:,:],axis=0)
advyt = np.append(advyt,advyt[:,None,0,:],axis=1)
advys = np.append(advys,advys[:,None,0,:],axis=1)
advxt = np.append(advxt,advxt[:,:,None,0],axis=2)
advxs = np.append(advxs,advxs[:,:,None,0],axis=2)
adv_at = -( advrt[:-1,:,:] - advrt[1:,:,:] \
+ advyt[:,1:,:] - advyt[:,:-1,:] \
+ advxt[:,:,1:] - advxt[:,:,:-1]) \
/(RA*DRF)
adv_as = -( advrs[:-1,:,:] - advrs[1:,:,:] \
+ advys[:,1:,:] - advys[:,:-1,:] \
+ advxs[:,:,1:] - advxs[:,:,:-1]) \
/(RA*DRF)
def comp_b (temp,salt):
return alphat*temp + betas*salt
# b0 = comp_b(t0,s0)
# b1 = comp_b(t1,s1)
# pe0 = RC*b0
# pe1 = RC*b1
dbdt = comp_b (dtdt,dsdt)
advb = comp_b(adv_at, adv_as)
dpedt = RC*(dbdt-advb)
def yzplot(psi,*args, **kwargs):
psi = np.where(np.isnan(psi),0.,psi)
vmax = np.max(np.abs((psi)))
vmax = kwargs.get('vmax', vmax)
vmin = -vmax
psi = np.where(psi<vmin,vmin,psi)
psi = np.where(psi>vmax,vmax,psi)
title = kwargs.get('title',None)
fgrid = kwargs.get('fgrid', 0)
if fgrid:
xx = YC[:,ix]*1e-3
yy = RC[:,0,0]
else:
si_y,si_x = psi.shape
xx = np.arange(si_x)
yy = np.arange(si_y)
plt.figure()
plt.contourf(xx,yy,psi,100,cmap=plt.cm.seismic,vmin=vmin,vmax=vmax,extend='both')
plt.colorbar(format='%.0e')
plt.title(title)
if fgrid:
plt.xlabel('x (km)')
plt.ylabel('z (m)')
# psi = (pe1-pe0)/4500
# psi = psi[:,:,ix]
# yzplot(psi,title=r"tottend (m\,s$^{-2}$)",fgrid=flag_grid,vmax=1e-5)
# psi2 = dpedt[:,:,ix]
# yzplot(psi2,title=r"tottend (m\,s$^{-2}$)",fgrid=flag_grid,vmax=1e-5)
# psi3 = (psi - psi2)/psi2
# yzplot(psi3,title=r"tottend (m\,s$^{-2}$)",fgrid=flag_grid,vmax = 1e-3)
psi4 = adv_a[:,:,ix]
yzplot(psi4,vmax = 1e-4)
psi5 = dtdt[:,:,ix]
yzplot(psi5,vmax = 1e-4)
psi6 = psi5 - psi4
yzplot(psi6,vmax = 1e-4)
|
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import json
from typing import Any, Dict, Optional
from google.cloud import firestore
from google.oauth2 import service_account
class FirestoreWriter:
def __init__(self, project_id: str, credentials_json: Optional[str] = None):
connection = {}
connection["project"] = project_id
if credentials_json:
try:
json_account_info = json.loads(credentials_json)
except ValueError:
raise ValueError("The 'credentials_json' field must contain a valid JSON document with service account access data.")
credentials = service_account.Credentials.from_service_account_info(json_account_info)
connection["credentials"] = credentials
self.client = firestore.Client(**connection)
def check(self) -> bool:
return bool(list(self.client.collections()))
def write(self, stream: str, data: Dict[str, Any]) -> None:
self.client.collection(stream).add(data)
def purge(self, stream: str) -> None:
for doc in self.client.collection(stream).stream():
doc.reference.delete()
|
# Створити Функцію яка знаходить площу та периметр прямокутника .
# Функція приймає два аргумента
# ( (a,b, де a - довжина,b - ширина). Результат надрукувати на екран
# ( P = ... , S = ... )
print("Введіть довжину прямокутника")
a = float(input())
print("Введіть ширину прямокутника")
b = float(input())
P = 2*(a+b)
S = a*b
print ("Для прямокутника зі сторонами a =",a," і b =",b, " периметр становить: P = ",P, " і площа становить: S = ",S) |
#!/usr/bin/env python
# coding: utf-8
# |------------------------------------------------------------------
# | # Geospatial Data Exercise
# |------------------------------------------------------------------
# |
# | This is an exercise notebook for the third lesson of the kaggle course
# | ["Geospatial Analysis"](https://www.kaggle.com/learn/geospatial-analysis)
# | offered by Alexis Cook and Jessica Li. The main goal of the lesson is
# | to get used to __Interactive Maps__. We will learn how to use `folium`
# | with the following functions.
# |
# | * Map
# | * Circle (= bubble map)
# | * HeatMaps
# | * Choropleth
# |
# | Here 'interactive' means
# |
# | - zoom in / zoom out
# | - move (drag the map)
# | - tooltip (information shows up when the pointer is on a marker)
# | - popup (information shows up when a marker is clicked)
# | ## 1. Task
# |
# | Visualize how the largest cities in Japan are vulnerable to the
# | threat of big earthquakes in the future.
# | ## 2. Data
# |
# | 1. Known plate-boundaries.
# | 2. Historical earthquakes in 1970-2014.
# | 3. Borders (Shapely Polygon) of Japanese prefectures.
# | 4. Populations and areas in Japanese prefectures.
# | ## 3. Notebook
# |
# | Import packages.
from kaggle_geospatial.kgsp import *
from folium.plugins import HeatMap
from folium import Choropleth, Circle
import folium
import pandas as pd
import geopandas as gpd
import numpy as np
from pathlib import Path
import os
import webbrowser
import zipfile
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# -------------------------------------------------------
# | Set up some directories.
CWD = '/Users/meg/git6/earthquake/'
DATA_DIR = '../input/geospatial-learn-course-data/'
KAGGLE_DIR = 'alexisbcook/geospatial-learn-course-data'
GEO_DIR = 'geospatial-learn-course-data'
set_cwd(CWD)
set_data_dir(DATA_DIR, KAGGLE_DIR, GEO_DIR, CWD)
show_whole_dataframe(True)
# ---------------------------------------
# | Read plate data. Somehow the coordinate in 'geometry' of
# | `plate_boundaries` is (longitude, latitude), instead of (latitude, longitude)
# | which is the standard of EPSG:4326. Swap them, and store them
# | to a new column `coordinates'.
plate_boundaries_dir = DATA_DIR + "Plate_Boundaries/Plate_Boundaries/"
plate_boundaries = gpd.read_file(
plate_boundaries_dir + "Plate_Boundaries.shp")
plate_boundaries['coordinates'] = plate_boundaries.apply(
lambda x: [(b, a) for (a, b) in list(x.geometry.coords)], axis=1)
print(plate_boundaries.info())
print(plate_boundaries.crs)
plate_boundaries.head(3)
# ---------------------------------------
# | Read the record of the historical earthquakes.
earthquakes = pd.read_csv(DATA_DIR + "earthquakes1970-2014.csv",
parse_dates=["DateTime"])
print(earthquakes.info())
earthquakes.head(3)
# ---------------------------------------
# | Read the prefectural boundaries.
prefectures_dir = DATA_DIR + "japan-prefecture-boundaries/japan-prefecture-boundaries/"
prefectures = gpd.read_file(
prefectures_dir + "japan-prefecture-boundaries.shp")
prefectures.set_index('prefecture', inplace=True)
print(prefectures.info())
prefectures.head(3)
# ---------------------------------------
# | Read the population and the areas of each prefecture.
population = pd.read_csv(DATA_DIR + "japan-prefecture-population.csv")
population.set_index('prefecture', inplace=True)
# ========================================
# | Visualize the plate boundaries near Japan.
# | Overlay a heatmap of the historical earthquakes.
x = np.array([p.centroid.x for p in prefectures['geometry']]).mean()
y = np.array([p.centroid.y for p in prefectures['geometry']]).mean()
center = [y, x]
zoom = 7
# tiles = 'openstreetmap'
tiles = 'cartodbpositron'
# -------------------------------------------------------
m_1 = folium.Map(location=center, tiles=tiles, zoom_start=zoom)
dump = [folium.PolyLine(
locations=p, weight=12, color='mediumvioletred').add_to(m_1)
for p in plate_boundaries['coordinates']]
HeatMap(data=earthquakes[['Latitude', 'Longitude']],
radius=30).add_to(m_1)
# -------------------------------------------------------
# | Show it on the notebook and the browser window.
embed_map(m_1, './html/m_1.html')
# --
show_on_browser(m_1, CWD + './html/m_1b.html')
# -------------------------------------------------------
# | Earthquakes often happens about 100-300 km west
# | of the plate boundaries. Northern half of Japan,
# | with Tokyo on the southern-most edge,
# | is particularly vulnerable to the future earthquakes.
#
# =======================================================
# | Calculate the area (in square kilometers) of each prefecture.
area_sqkm = pd.Series(prefectures['geometry'].to_crs(
epsg=32654).area / 10**6, name='area_sqkm')
# -------------------------------------------------------
# | Add the population density (per square kilometer) for each prefecture.
# | What are the most densely populated prefectures?
stats['density'] = stats['population'] / stats['area_sqkm']
stats['log10_density'] = np.log(stats['density'])
# | What are the most populous prefectures?
stats = population.join(area_sqkm)
stats.sort_values('population').tail(5)
# -------------------------------------------------------
# | Use `plotly` to see the populations and the population densities
# | of the Japanese prefectures.
n_rows = 1
n_cols = 2
fig = make_subplots(rows=n_rows, cols=n_cols,
vertical_spacing=0.05,
horizontal_spacing=0.05,
subplot_titles=['Populations in Japanese Prefectures',
'Densities in Japanese Prefectures'])
trace1 = go.Bar(y=stats.sort_values('density').index,
x=stats.sort_values('density')['population'],
marker=dict(color='teal'),
orientation='h',
xaxis='x',
yaxis='y')
trace2 = go.Bar(y=stats.sort_values('density').index,
x=stats.sort_values('density')['density'],
marker=dict(color='coral'),
orientation='h',
xaxis='x2',
yaxis='y2')
data = [trace1, trace2]
layout = go.Layout(height=512 * 4, width=1024,
font=dict(size=20),
showlegend=False)
layout = fig.layout.update(layout)
fig = go.Figure(data=data, layout=layout)
# -------------------------------------------------------
# | Show it on the notebook and the browser window.
embed_plot(fig, './html/p_1.html')
# --
fig.show()
# -------------------------------------------------------
# Show highly populated prefectures in Choropleth.
m_2 = folium.Map(location=center, tiles=tiles, zoom_start=zoom)
Choropleth(geo_data=prefectures.__geo_interface__,
data=stats['density'],
# data=stats,
columns=['density'],
key_on='feature.id',
color='navy',
fill_color='YlGnBu',
fill_opacity=0.8
).add_to(m_2)
# -------------------------------------------------------
# | Show it on the notebook and the browser window.
embed_map(m_2, './html/m_2.html')
# --
show_on_browser(m_2, CWD + './html/m_2b.html')
# -------------------------------------------------------
# Show the historical earthquake record in a bubble map.
m_3 = folium.Map(location=center, tiles=tiles, zoom_start=zoom)
dump = [folium.PolyLine(
locations=p, weight=12, color='mediumvioletred').add_to(m_3)
for p in plate_boundaries['coordinates']]
Choropleth(geo_data=prefectures.__geo_interface__,
data=stats['log10_density'],
columns=['log_density'],
key_on='feature.id',
color='navy',
fill_color='YlGnBu',
fill_opacity=0.8
).add_to(m_3)
dump = [Circle([r['Latitude'], r['Longitude']],
radius=4 ** r['Magnitude'],
color='',
fill_color='coral',
fill_opacity=0.5,
fill=True).add_to(m_3)
for i, r in earthquakes.iterrows()]
# -------------------------------------------------------
# | Show it on the notebook and the browser window.
embed_map(m_3, './html/m_3.html')
# --
show_on_browser(m_3, CWD + './html/m_3b.html')
# -------------------------------------------------------
# | ## 4. Conclusion
# |
# | If we limit ourselves only to the largest earthquakes
# | that leads to potential catastrophe, the most vulnerable
# | area in Japan is the northern Kanto area near Tokyo,
# | where the most densely populated area overlaps with the
# | south edge of the plate boundaries in the northern Japan.
# |
# | The prefectures need to invest to prepare future earthquakes are
# |
# | - Ibaraki
# | - Chiba
# | - Miyagi
# | - Gunma
# | - Tokyo
# | - Kanagawa
# |
# | in this order.
# -------------------------------------------------------
# | END
|
# coding=utf-8
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy
from enumfields import ChoiceEnum, NumChoiceEnum, Choice
class Color(ChoiceEnum):
__order__ = 'RED GREEN BLUE'
RED = Choice('r', 'Reddish')
GREEN = 'g'
BLUE = Choice('b', ugettext_lazy('bluë'))
class Taste(ChoiceEnum):
SWEET = 1
SOUR = 2
BITTER = 3
SALTY = 4
UMAMI = 5
class ZeroEnum(ChoiceEnum):
ZERO = 0
ONE = 1
class IntegerEnum(NumChoiceEnum):
A = Choice(0, 'foo')
B = 1
C = 2
class LabeledEnum(ChoiceEnum):
FOO = Choice('foo', 'Foo')
BAR = Choice('bar', 'Bar')
FOOBAR = Choice('foobar', 'Foo') # this is intentional. see test_nonunique_label
class SubIntegerEnum(NumChoiceEnum):
C = Choice(0, 'C', parents=(IntegerEnum.A, IntegerEnum.B))
D = Choice(1, 'D', parents=(IntegerEnum.B,))
|
from octo.plugin import OctoPlugin
class PluginTwo(OctoPlugin):
pass
|
from .dxf15parser import DXF15Parser
from .dxf15record import DXF15Record
from .dxf15node import DXF15Node
|
"""
A wrapper around a 32-bit LabVIEW library, :ref:`labview_lib32 <labview-lib>`.
.. attention::
This example requires that the appropriate
`LabVIEW Run-Time Engine <https://www.ni.com/download/labview-run-time-engine-2015/5507/en/>`_ is installed
and that the operating system is Windows.
Example of a server that loads a 32-bit shared library, :ref:`labview_lib <labview-lib>`,
in a 32-bit Python interpreter to host the library. The corresponding :mod:`~.labview64` module
can be executed by a 64-bit Python interpreter and the :class:`~.labview64.Labview64` class can send
a request to the :class:`~.labview32.Labview32` class which calls the 32-bit library to execute the
request and then return the response from the library.
"""
import os
from ctypes import c_double, byref
from msl.loadlib import Server32
class Labview32(Server32):
def __init__(self, host, port, **kwargs):
"""A wrapper around the 32-bit LabVIEW library, :ref:`labview_lib32 <labview-lib>`.
Parameters
----------
host : :class:`str`
The IP address of the server.
port : :class:`int`
The port to open on the server.
Note
----
Any class that is a subclass of :class:`~msl.loadlib.server32.Server32` **MUST**
provide two arguments in its constructor: `host` and `port` (in that order)
and `**kwargs`. Otherwise the ``server32`` executable, see
:class:`~msl.loadlib.start_server32`, cannot create an instance of the
:class:`~msl.loadlib.server32.Server32` subclass.
"""
super(Labview32, self).__init__(os.path.join(os.path.dirname(__file__), 'labview_lib32.dll'),
'cdll', host, port)
def stdev(self, x, weighting=0):
"""Calculates the mean, variance and standard deviation of the values in the input `x`.
See the corresponding 64-bit :meth:`~.labview64.Labview64.stdev` method.
Parameters
----------
x : :class:`list` of :class:`float`
The data to calculate the mean, variance and standard deviation of.
weighting : :class:`int`, optional
Whether to calculate the **sample**, ``weighting = 0``, or the **population**,
``weighting = 1``, standard deviation and variance.
Returns
-------
:class:`float`
The mean.
:class:`float`
The variance.
:class:`float`
The standard deviation.
"""
data = (c_double * len(x))(*x)
mean, variance, std = c_double(), c_double(), c_double()
self.lib.stdev(data, len(x), weighting, byref(mean), byref(variance), byref(std))
return mean.value, variance.value, std.value
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-11 21:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0004_uploadedfile'),
]
operations = [
migrations.CreateModel(
name='RegulationFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hexhash', models.CharField(default=None, max_length=32, null=True)),
('filename', models.CharField(default=None, max_length=512, null=True)),
('contents', models.BinaryField()),
],
),
migrations.DeleteModel(
name='UploadedFile',
),
]
|
import cv2
import time
from pysimplendi import NDIReceiver
if __name__ == '__main__':
# initialize NDIReceiver
receiver = NDIReceiver()
# get NDI source list
source_list = receiver.getSourceList()
for s in source_list:
print(s)
# exit if no NDI sources
if len(source_list) == 0:
exit(0)
# set source
receiver.setSource(source_list[0])
window_name = 'frame'
counter = 0
while True:
# get current frame
frame = receiver.getCurrentFrame()
if len(frame) >= 3:
counter += 1
cv2.imshow(window_name, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time.sleep(0.1)
cv2.destroyWindow(window_name)
|
import requests
from chatrender.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
@staff_member_required
def channels(request, chat_type):
response = requests.get('{}?chat_type={}'.format(
settings.SLACKCHAT_CHANNEL_ENDPOINT,
chat_type,
))
channels = response.json()
return render(
request,
'chatrender/channel_list.html',
context={"channels": channels, "chat_type": chat_type}
)
|
"""
Created: 2001/10/12
__version__ = "$Revision: 1.21 $"
__date__ = "$Date: 2005/03/28 05:46:59 $"
"""
import math
import wx
import time
class AbstractTurtle:
def __init__(self, size):
""" this should be called after a subclass has done its init """
self._size = size
self._tracing = False
self._degrees()
# KEA 2002-03-01
# define abbreviated versions of commands
# these should still work even if the AbstractTurtle or BitmapTurtle
# classes are subclasses
# Get the class for an instance.
selfClass = self.__class__
# Assign several synonyms as Class attributes.
if not hasattr(selfClass, 'fd'): selfClass.fd = selfClass.forward
if not hasattr(selfClass, 'bk'): selfClass.bk = selfClass.backward
if not hasattr(selfClass, 'back'): selfClass.back = selfClass.backward
if not hasattr(selfClass, 'rt'): selfClass.rt = selfClass.right
if not hasattr(selfClass, 'lt'): selfClass.lt = selfClass.left
if not hasattr(selfClass, 'st'): selfClass.st = selfClass.showTurtle
if not hasattr(selfClass, 'ht'): selfClass.ht = selfClass.hideTurtle
if not hasattr(selfClass, 'pu'): selfClass.pu = selfClass.penUp
if not hasattr(selfClass, 'pd'): selfClass.pd = selfClass.penDown
if not hasattr(selfClass, 'clearScreen'): selfClass.clearScreen = selfClass._clear
if not hasattr(selfClass, 'cg'): selfClass.cg = selfClass._clear
if not hasattr(selfClass, 'cs'): selfClass.cs = selfClass._clear
if not hasattr(selfClass, 'cls'): selfClass.cls = selfClass._clear
if not hasattr(selfClass, 'pc'): selfClass.pc = selfClass.color
if not hasattr(selfClass, 'setPenColor'): selfClass.setPenColor = selfClass.color
if not hasattr(selfClass, 'bc'): selfClass.bc = selfClass.setBackColor
if not hasattr(selfClass, 'draw'): selfClass.draw = selfClass.lineTo
# this caused some problems when reset() in the subclass tried
# to call its superclass AbstractTurtle.reset() so I probably wasn't doing
# the method calls correctly
# until I figure it out, I put all the needed code of reset() into
# the subclass
self.reset()
def distance(self, t2):
"""returns the distance between two turtles"""
return math.sqrt(math.pow(self._position[0] - t2._position[0], 2) +
math.pow(self._position[1] - t2._position[1], 2))
def distanceXY(self, x, y):
"""returns the distance between turtle (self) and a point (x, y)"""
return math.sqrt(math.pow(self._position[0] - x, 2) + math.pow(self._position[1] - y, 2))
def _degrees(self, fullcircle=360.0):
self._fullcircle = fullcircle
self._invradian = math.pi / (fullcircle * 0.5)
# illegal syntax, so I used None and the if tests instead
#def plot(self, x=self._position[0], y=self._position[1]):
def plot(self, x=None, y=None):
""" override """
pass
# may have to do multiple versions of methods if case-sensitivity
# is an issue
def dot(self, x, y):
"""Logo plot, should only be 1 pixel wide though"""
self.plot(x, y)
# there are also setX and setY, but I don't remember if they do a plot or a moveTo...
def getXY(self):
"""returns the turtle position"""
return self._position
# this may actually be the Logo plot command rather than
# the equivelant of a moveTo
# need to change these methods so they can handle
# x, y or a tuple (x, y)
def setXY(self, pos):
"""another name for moveTo
this may actually be the Logo plot command rather than the
equivelant of a moveTo"""
self.moveTo(pos[0], pos[1])
def line(self, x1, y1, x2, y2):
""" override """
pass
def getHeading(self):
"""returns the turtle heading in degrees"""
return self._angle
def setHeading(self, angle):
"""set the turtle heading in degrees"""
if self._dirty:
self._drawTurtle()
self._angle = angle % self._fullcircle
if self._visible:
self._drawTurtle()
# my math is shaky, there has to be a shorter
# way to return an angle between 0 - 360
# remember however that 0,0 is the top-left corner
# and positive x, positive y is the bottom-right
# not positive x, negative y as in normal cartesian coordinates
# THIS WAS HACKED TOGETHER SO IT COULD VERY WELL BE BUGGY
def towardsXY(self, x, y):
"""returns the angle the turle would need to heading towards
to face the position x, y"""
xDir = x - self._position[0]
yDir = y - self._position[1]
try:
angle = math.atan(yDir / xDir) * 180 / math.pi
except:
# should occur when xDir == 0.0 (divide by zero)
if yDir == 0.0:
return 0.0
elif yDir < 0:
return 90.0
else:
return 270.0
if xDir < 0:
angle = 180 - angle
elif yDir < 0:
angle = -1.0 * angle
elif yDir > 0:
angle = 360.0 - angle
return angle
def towards(self, t):
"""equivelant to towardsXY
returns the angle the turle would need to heading towards
to face the position x, y"""
return self.towardsXY(t._position[0], t._position[1])
def _radians(self):
self._degrees(2.0 * math.pi)
def forward(self, distance):
"""moves the turtle forward distance"""
angle = self._angle * self._invradian
self._goto(self._position[0] + distance * math.cos(angle),
self._position[1] - distance * math.sin(angle))
def backward(self, distance):
"""moves the turtle backward distance"""
self.forward(-distance)
def left(self, angle):
"""turns the turtle left angle degrees"""
if not self._drawingTurtle and self._dirty:
self._drawTurtle()
self._angle = (self._angle + angle) % self._fullcircle
#print self._angle
if not self._drawingTurtle and self._visible:
self._drawTurtle()
def right(self, angle):
"""turns the turtle right angle degrees"""
self.left(-angle)
def polygon(self, sides, distance):
"""draws a polygon with sides of length distance
(e.g. polygon(4, 100) draws a square)
polygon uses the forward and right methods to move the turtle"""
angle = 360.0 / sides
for i in range(sides):
self.forward(distance)
self.right(angle)
def cPolygon(self, sides, radius):
"""draws a polygon centered on the current turtle position
(e.g. cPolygon(4, 100) draws a square)
the radius determines the size of the polygon, so that
a circle with the same radius would intersect the vertices
cPolygon uses the forward and right methods to move the turtle"""
self.penUp()
self.forward(radius)
angle = 180.0 - (90.0 * (sides - 2) / sides)
self.right(angle) # must be the same as polygon turn above
self.penDown()
edge = 2 * radius * math.sin(math.pi / sides) # logo uses sin(180 / sides); Python uses radians
self.polygon(sides, edge)
self.left(angle) # turn back to where we started
self.penUp()
self.backward(radius)
self.penDown()
def showTurtle(self):
"""makes the turtle visible
the visibility of the turtle is independent of the turtle pen,
use penUp() and penDown() to control the pen state"""
if not self._visible:
self._drawTurtle()
self._visible = True
def hideTurtle(self):
"""hides the turtle
the visibility of the turtle is independent of the turtle pen,
use penUp() and penDown() to control the pen state"""
if self._dirty:
self._drawTurtle()
self._visible = False
def _drawTurtle(self):
""" override """
pass
def suspendOdometer(self):
"""suspends the turtle odometer"""
self._odometerOn = False
def resumeOdometer(self):
"""resumes the turtle odometer"""
self._odometerOn = True
def getOdometer(self):
"""returns the turtle odometer"""
return self._odometer
def resetOdometer(self):
"""resets the turtle odometer to 0.0"""
self._odometer = 0.0
def penUp(self):
"""raises the turtle pen, so no drawing will occur on subsequent
commands until the pen is lowered with penDown"""
self._drawing = False
def penDown(self):
"""lowers the turtle pen"""
self._drawing = True
"""
# Logo functions to implement
# see http://www.embry.com/rLogo/rLogoReference.html
# and http://www.embry.com/rLogo/Index.html for a working logo applet
# setpencolor will be handled by color()
setpencolor and setpc (red) (green) (blue)
- changes the pen color to the specified color.
(red), (green), and (blue) must range from 0 to 255.
setbackcolor and setbc (red) (green) (blue)
- changes the background color to the specified color.
(red), (green), and (blue) must range from 0 to 255
write
"""
def write(self, txt):
""" override """
pass
def width(self, width):
"""set the pen width"""
self._width = float(width)
def home(self):
"""resets the turtle back to its starting location"""
x0, y0 = self._origin
self._goto(x0, y0)
#self._goto(self._origin)
self._angle = 0.0
def _clear(self):
""" override """
pass
# need to enhance this to support the various
# color settings: colourName, rgb
""" valid named colors from wxWindows http://www.lpthe.jussieu.fr/~zeitlin/wxWindows/docs/wxwin64.htm#wxcolourdatabase
aquamarine, black, blue, blue violet, brown, cadet blue, coral, cornflower blue, cyan, dark grey,
dark green, dark olive green, dark orchid, dark slate blue, dark slate grey dark turquoise, dim
grey, firebrick, forest green, gold, goldenrod, grey, green, green yellow, indian red, khaki,
light blue, light grey, light steel blue, lime green, magenta, maroon, medium aquamarine, medium
blue, medium forest green, medium goldenrod, medium orchid, medium sea green, medium slate blue,
medium spring green, medium turquoise, medium violet red, midnight blue, navy, orange, orange
red, orchid, pale green, pink, plum, purple, red, salmon, sea green, sienna, sky blue, slate
blue, spring green, steel blue, tan, thistle, turquoise, violet, violet red, wheat, white,
yellow, yellow green
"""
def color(self, *args):
""" override """
pass
def setBackColor(self, *args):
""" override """
pass
def turtleDelay(self, s):
"""set the delay the turtle waits between drawing commands"""
self._turtleDelay = s
def reset(self):
""" this should be called after a subclass has done its reset """
width, height = self._size
self._origin = width/2.0, height/2.0
#print "width: %d, height: %d" % (width, height)
#print "_origin.x: %f, _origin.y: %f" % (self._origin[0], self._origin[1])
self._position = self._origin
self._angle = 0.0
# used for save and restore methods
self._savePosition = self._position
self._saveAngle = self._angle
# used for LIFO stack of turtle state
self._turtleStack = []
self._odometer = 0.0
# don't waste time tracking unless requested
self._odometerOn = False
# whether the pen is down
self._drawing = True
# the pen width
self._width = 1
# whether the turtle is visible, independent of the pen state
self._visible = False
# if _dirty then erase old turtle before drawing
self._dirty = False
# only true while drawing the turtle
self._drawingTurtle = False
# number of seconds to pause after drawing the turtle
self._turtleDelay = 0
# implicit save of pen state, penUp, pen state restore
# KEA 2004-05-09
# why the heck did I not require both x and y to be specified?
# is that the way Logo works?
def moveTo(self, x=None, y=None):
"""move the turtle to position x, y"""
if x is None:
x = self._position[0]
if y is None:
y = self._position[1]
drawingState = self._drawing
self._drawing = False
self._goto(x, y)
self._drawing = drawingState
# this might get an implicit save of pen state, penDown, pen state restore
def lineTo(self, x=None, y=None):
"""draw a line between the current turtle position and x, y
the turtle is moved to the new x, y position"""
if x is None:
x = self._position[0]
if y is None:
y = self._position[1]
self._goto(x, y)
def _goto(self, x1, y1):
""" override """
self._position = (float(x1), float(y1))
# could save more than position and heading below
# perhaps the turtle color, whether it is shown or not?
def save(self):
"""save the current turtle position and heading"""
self._savePosition = self._position
self._saveAngle = self._angle
def restore(self):
"""restore the turtle position and heading to the last saved state"""
self.moveTo(self._savePosition[0], self._savePosition[1])
self.setHeading(self._saveAngle)
def push(self):
"""push the current turtle position and heading onto a LIFO stack"""
self._turtleStack.append((self._position, self._angle))
def pop(self):
"""set the turtle position and heading to the state popped from a LIFO stack"""
try:
position, angle = self._turtleStack.pop()
self.moveTo(position[0], position[1])
self.setHeading(angle)
except IndexError:
pass
class BitmapTurtle(AbstractTurtle):
def __init__(self, canvas):
self.canvas = canvas
#self.dc.SetOptimization(1)
AbstractTurtle.__init__(self, canvas.size)
# illegal syntax, so I used None and the if tests instead
#def plot(self, x=self._position[0], y=self._position[1]):
def plot(self, x=None, y=None):
"""draws a point at x, y using the current pen color and width
note that the the turtle position is not changed by plot"""
#self.dc.SetPen(self._color, self._width)
###self.dc.SetPen(self._pen)
self.canvas._bufImage.SetPen(self._pen)
if x is None:
x = self._position[0]
if y is None:
y = self._position[1]
# setting the color makes chaos1 over 3 times slower
# so we probably need to do a simpler if/then check
# prior to setting the color
# that means keeping the variable in a form that is
# simple to compare to the underlying dc canvas
self.canvas.drawPoint((round(x), round(y)))
# other variations
#self.dc.DrawLine(x, y, x+1, y+1)
#self._goto(x, y) # actually this isn't equivelant
# KEA 2005-03-26
# this is necessary to force a screen update on the Mac
if wx.Platform == '__WXMAC__' and self.canvas.autoRefresh:
#self.canvas.Refresh()
self.canvas.Update()
def line(self, x1, y1, x2, y2):
"""draws a line from x1, y1 to x2, y2 using the current
pen color and width
note that the the turtle position is not changed by line"""
###self.dc.SetPen(self._pen)
self.canvas._bufImage.SetPen(self._pen)
###self.dc.DrawLine(x1, y1, x2, y2)
self.canvas.drawLine((round(x1), round(y1)), (round(x2), round(y2)))
# KEA 2005-03-26
# this is necessary to force a screen update on the Mac
if wx.Platform == '__WXMAC__' and self.canvas.autoRefresh:
#self.canvas.Refresh()
self.canvas.Update()
# probably replace this with a wxPython primitive for polygons
# so we can support filled polys...
# def polygon(self, sides, distance):
"""
# Logo functions to implement
# see http://www.embry.com/rLogo/rLogoReference.html
# and http://www.embry.com/rLogo/ for a working logo applet
# setpencolor will be handled by color()
setpencolor and setpc (red) (green) (blue)
- changes the pen color to the specified color.
(red), (green), and (blue) must range from 0 to 255.
setbackcolor and setbc (red) (green) (blue)
- changes the background color to the specified color.
(red), (green), and (blue) must range from 0 to 255
"""
def write(self, txt):
"""prints txt at the current turtle position"""
###self.dc.DrawText(txt, self._position[0], self._position[1])
self.canvas.drawText(txt, self._position)
def _clear(self):
###self.dc.Clear()
self.canvas.clear()
self._dirty = False
# KEA 2005-03-26
# this is necessary to force a screen update on the Mac
if wx.Platform == '__WXMAC__' and self.canvas.autoRefresh:
#self.canvas.Refresh()
self.canvas.Update()
# need to enhance this to support the various
# color settings: colourName, rgb
""" valid named colors from wxWindows http://www.lpthe.jussieu.fr/~zeitlin/wxWindows/docs/wxwin64.htm#wxcolourdatabase
aquamarine, black, blue, blue violet, brown, cadet blue, coral, cornflower blue, cyan, dark grey,
dark green, dark olive green, dark orchid, dark slate blue, dark slate grey dark turquoise, dim
grey, firebrick, forest green, gold, goldenrod, grey, green, green yellow, indian red, khaki,
light blue, light grey, light steel blue, lime green, magenta, maroon, medium aquamarine, medium
blue, medium forest green, medium goldenrod, medium orchid, medium sea green, medium slate blue,
medium spring green, medium turquoise, medium violet red, midnight blue, navy, orange, orange
red, orchid, pale green, pink, plum, purple, red, salmon, sea green, sienna, sky blue, slate
blue, spring green, steel blue, tan, thistle, turquoise, violet, violet red, wheat, white,
yellow, yellow green
"""
def width(self, w):
"""set the pen width"""
if self._dirty:
self._drawTurtle()
#self._width = float(width)
#self._width = w
self._pen.SetWidth(w)
if self._visible:
self._drawTurtle()
def color(self, *args):
"""set the foreground pen color
both (r, g, b) values and named colors are valid"""
if self._dirty:
self._drawTurtle()
# _color is actually the pen here
# I need to just keep a pen and change the elements
# of it
if len(args) == 1:
#self._color = wx.Pen(wx.NamedColour(args[0]))
#self._color = wx.NamedColour(args[0])
self._pen.SetColour(wx.NamedColour(args[0]))
else:
# self._color = wx.Pen(wx.Colour(args[0], args[1], args[2]))
#self._color = wx.Colour(args[0], args[1], args[2])
self._pen.SetColour(wx.Colour(args[0], args[1], args[2]))
if self._visible:
self._drawTurtle()
# http://www.lpthe.jussieu.fr/~zeitlin/wxWindows/docs/wxwin265.htm
# wxPen reference for pen width and style
# colourdb.py for actual rgb color values of named colors in wxPython
"""
print self._color.GetCap()
print self._color.GetColour()
#print self._color.GetDashes()
print self._color.GetJoin()
print self._color.GetStipple()
print self._color.GetStyle()
print self._color.GetWidth()
"""
""" example output
130
(0, 0, 255)
122
<C wxBitmap instance at _3331988_wxBitmap_p>
100
1
"""
#print self._color
# self.dc.SetPen(wx.Pen(wx.NamedColour(args[0])))
# the background is shared, not specific to each turtle
def setBackColor(self, *args):
"""set the background pen color
both (r, g, b) values and named colors are valid"""
if len(args) == 1:
self.canvas.backgroundColor = args[0]
else:
self.canvas.backgroundColor = (args[0], args[1], args[2])
# non-optimized turtle
# the turtle rotates from its base rather than a pivot point
# in the "center" of the turtle shape
#
# as currently implemented drawTurtle is designed to erase
# itself on a subsequent call to drawTurtle
# in addition, this version does not use offscreen bitmaps
# so an inversion is done on the pixels underneath the turtle
# rather than showing the actual color of the current turtle
def _drawTurtle(self):
"""private method for drawing the turtle when showTurtle()
has been called to make the turtle visible"""
#if not self._drawingTurtle:
self._dirty = not self._dirty
self._drawingTurtle = True
drawingState = self._drawing
currentPos = self._position
currentAngle = self._angle
#self._pen.SetCap(wx.CAP_PROJECTING)
#self._pen.SetJoin(wx.JOIN_BEVEL)
###self.dc.SetPen(self._pen)
self.canvas._bufImage.SetPen(self._pen)
###self.dc.SetLogicalFunction(wx.INVERT)
self.canvas._bufImage.SetLogicalFunction(wx.INVERT)
a = 30 # make a larger to get a bigger turtle
b = 2 * a * math.tan(15 * self._invradian)
c = a / math.cos(15 * self._invradian)
self.pu(); self.forward(a); self.pd()
self.left(165)
self.forward(c)
self.left(105)
self.forward(b)
self.left(105)
self.forward(c)
#self._pen.SetJoin(wx.JOIN_ROUND)
#self._pen.SetCap(wx.CAP_ROUND)
###self.dc.SetLogicalFunction(wx.COPY)
self.canvas._bufImage.SetLogicalFunction(wx.COPY)
self._angle = currentAngle
self._drawing = drawingState
self._position = currentPos
self._drawingTurtle = False
if self._dirty and self._turtleDelay > 0:
time.sleep(self._turtleDelay)
def reset(self):
"""reset the turtle to its initial state"""
self._size = self.canvas.size
self._color = wx.Pen(wx.NamedColour("black"))
self._pen = wx.Pen('black', 1, wx.SOLID)
self.setBackColor('white')
AbstractTurtle.reset(self)
def _goto(self, x1, y1):
if not self._drawingTurtle:
if self._odometerOn:
self._odometer += self.distanceXY(x1, y1)
if self._dirty:
# this is necessary to avoid an endless loop as drawTurtle uses _goto
#self._dirty = 0
#visible = self._visible
#self._visible = 0
self._drawTurtle()
#self._visible = visible
x0, y0 = start = self._position
self._position = (float(x1), float(y1))
if self._drawing:
"""
if self._tracing:
dx = float(x1 - x0)
dy = float(y1 - y0)
distance = hypot(dx, dy)
nhops = int(distance)
#print "tracing %d %d %d %d" % (x0, y0, x0, y0)
#self.dc.SetPen(self._color, self._width)
self.dc.SetPen(self._pen)
self.dc.DrawLine(x0, y0, x1, y1)
else:
"""
#print "%d %d %d %d" % (x0, y0, x0, y0)
#self.dc.SetPen(self._color, self._width)
if not self._drawingTurtle:
###self.dc.SetPen(self._pen)
self.canvas._bufImage.SetPen(self._pen)
###self.dc.DrawLine(x0, y0, x1, y1)
self.canvas.drawLine((round(x0), round(y0)), (round(x1), round(y1)))
if not self._drawingTurtle:
# KEA 2005-03-26
# this is necessary to force a screen update on the Mac
if wx.Platform == '__WXMAC__' and self.canvas.autoRefresh:
self.canvas.Update()
if self._visible:
# this is necessary to avoid an endless loop as drawTurtle uses _goto
#self._visible = 0
#self._dirty = 1
self._drawTurtle()
#self._visible = 1
|
import tkinter.messagebox
import tkinter.filedialog
from astropy.io import fits
import numpy
from operator import itemgetter
class Info():
"""Info-class for pop-up messages"""
@staticmethod
def notimplemented():
tkinter.messagebox.showinfo("Not implemented", "Not implemented yet")
@staticmethod
def about():
tkinter.messagebox.showinfo("About", "About")
@staticmethod
def askyesno(tit, tex):
return tkinter.messagebox.askyesno(tit, tex)
@staticmethod
def message(msg1, msg2):
tkinter.messagebox.showinfo(msg1, msg2)
@staticmethod
def license():
pass
def loadfile():
"""loads file. Has build-in file format recognize system
returns tuple(directory, text)"""
try:
x = tkinter.filedialog.askopenfilename()
except TypeError:
return
if not x:
return
y = x.split(".")
if y[-1] == "fits":
# TODO: this is extremely stupid and dummy. Create new function for converting
# add proper formating etc
hdulist = fits.open(x)
tbdata = hdulist[1].data
a = tbdata.field('TMID')/86400.0 + 2453005.5
b = 15 - 2.5*numpy.log10(tbdata.field('TAMFLUX2'))
out = ""
for i in range(len(a)):
out += str(a[i]) + " " * 5 + str(b[i]) + "\n"
return (x, out)
else:
file = open(x)
y = file.read()
file.close()
s = (x, y)
return s
def quicksavefile(directory, text, format=".out"):
"""saves file in given directory in fiven format"""
print(text)
print(directory)
directory = directory.split(".")
del directory[-1]
directory.append(format)
s = "".join(directory)
file = open(s, "w")
file.write(text)
file.close()
def remove_empty(data):
"""Removes empty items from list"""
out = []
for item in data:
if item == '':
continue
out.append(item)
return out
def cut_data(data):
"""cuts two-row data into two seperate lists. Items are formatted as float"""
out = [[], []]
data = data.split("\n")
for line in data:
line = line.split(" ")
line = remove_empty(line)
try:
out[0].append(float(line[0]))
out[1].append(float(line[1]))
except IndexError:
pass
file = open("test.txt", "w")
for i in out[1]: # DELETE
file.write(str(i))
file.write("\n")
file.close()
return out
def savefile(text):
"""opens tkinter filedialog to save file"""
file = tkinter.filedialog.asksaveasfile(mode='w', defaultextension='.txt')
if not file:
return
file.write(text)
file.close()
def text_to_list(text):
"""creates to-plot-list from string"""
text = text.split("\n")
mylist = []
a, b = [], []
x = " "
for el in text:
temp = []
el = el.split()
for pos in el:
if len(pos) == 0:
continue
if x not in pos:
temp.append(pos)
try:
a.append(temp[0])
b.append(temp[1])
except IndexError:
pass
a = del_empty(a)
b = del_empty(b)
mylist.append(a)
mylist.append(b)
return mylist
def del_empty(list):
"""deletes empty elements in lists"""
for x in range(len(list)):
if len(list[x - 1]) == 0:
del list[x - 1]
return list
def del_empty_space(list):
"""deletes empty elements with "space" in it"""
for x in range(len(list)):
if " " in list[x - 1]:
del list[x - 1]
return list
def clear_list(list):
""" clears "" and " " in list """
for x in range(len(list)):
try:
list.remove("")
except ValueError:
pass
try:
list.remove(" ")
except ValueError:
pass
return list
def get_without(list, char="#"):
"""returns list with elements without char"""
s = []
for line in list:
if char not in line:
s.append(line)
return s
def myformat(table):
"""creates str from table and formats it"""
m = 0
table = sorted(table, key=itemgetter(0))
for t in table:
t = str(t)
if len(t[0]) > m:
m = len(t[0])
m += 10
fstr = "{0:}" + m*" " + "{1:}"
s = ""
for x in table:
try:
a = float(x[0])
b = float(x[1])
s += "{0:.5f}{1:{width}}".format(a, b, width=m) + "\n"
except IndexError:
pass
return s
"""
out = ""
for pair in table:
out += str(pair[0]) + 5*" " + str(pair[1]) + "\n"
return out"""
def average(data_list):
return sum(data_list) / len(data_list)
if __name__ == "__main__":
pass
|
class Log(object):
def __init__(self, path, append=False):
self.f_log = open(path,"w" if not append else "a+")
def log(self, *args):
self.f_log.write(*args)
self.f_log.write("\n")
self.f_log.flush()
def close(self):
self.f_log.close()
def __del__(self):
self.f_log.close()
|
#
# Protein Engineering Analysis Tool Structure Analysis (PEATSA)
# Copyright (C) 2010 Michael Johnston & Jens Erik Nielsen
#
# Author: Michael Johnston
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
'''Contains classes and functions for processing a submission from the WebApp's main web page'''
import subprocess, os, urlparse, urllib2, string, time, StringIO, sys
import PEATSA.Core as Core
import UtilityFunctions, Exceptions, Data
def GetPDB(pdbCode, dict={}):
'''Fetches a pdb from the pdb. Returns a stream to the pdb contents.
If dict is provided it contains the key 'stream' on success whose value is the stream.
On fail it contains two keys 'error' and 'description'''
url = "http://www.rcsb.org//pdb/download/downloadFile.do?fileFormat=pdb&compression=NO&structureId=" + pdbCode
try:
#URLLib API changed in 2.6
if sys.version_info[:2] == (2,5):
import socket
socket.setdefaulttimeout(10)
stream = urllib2.urlopen(url)
else:
stream = urllib2.urlopen(url, None, timeout=10)
#Check for an error
info = stream.info()
status = info.status
if status is not "":
stream = None
dict['error'] = 'Error status %s' % str(status)
elif not info.has_key('content-disposition'):
stream = None
dict['error'] = 'Request for %s returned nothing' % pdbCode
else:
lines = stream.readlines()
string = "".join(lines)
stream = StringIO.StringIO(string)
dict['stream'] = stream
except urllib2.HTTPError, data:
dict['error'] = 'Unable to retrive structure for pdb code %s from the Protein Data Bank. ' % pdbCode
dict['description'] = 'Reason: %s' % data
stream = None
except urllib2.URLError, data:
dict['error'] = 'Unable to retrive structure for pdb code %s from the Protein Data Bank. ' % pdbCode
dict['description'] = 'Reason: %s' % data.reason
stream = None
except Exception, data:
dict['error'] = 'Unable to retrive structure for pdb code %s from the Protein Data Bank. ' % pdbCode
dict['description'] = 'Reason: Encountered unexpected exception %s' % data
stream = None
return stream
def CreateMutationString(mutationData, webStorageDirectory, jobInputDirectory, job):
'''Creates the mutation part of the command line arguments based on the mutationData dictionary'''
mutationType = mutationData['Type']
mutationString = "--%s=%s"
if mutationType == 'mutationList':
#Write out the file to the web storage directory
filename = os.path.join(webStorageDirectory, 'mutationList')
stream = open(filename, 'w+')
stream.write(mutationData['Data'].read())
try:
#Fixme - catch mutation list exception
stream.seek(0)
mutationList = Core.Data.mutationListFileFromStream(stream)
job.setMutationListFile(mutationList)
stream.close()
except:
pass
#Create the string
#Note we must give the path to the mutationFile as it will
#be when the job is run
filename = os.path.join(jobInputDirectory, 'mutationList')
mutationString = mutationString % (mutationType, filename)
elif mutationType == 'mutation':
mutationString = mutationString % (mutationType, mutationData['Data'])
job.setMutation(mutationData['Data'])
return mutationString
def CreateCalculationString(calculations, ligandFile):
'''Creates the calculation parts of the command line arguments bassed on the calculations list'''
calculationString = ""
if calculations.count('scan') != 0:
calculationString = calculationString + "--scan "
if calculations.count('stability') != 0:
calculationString = calculationString + " --stability"
if ligandFile is not None and calculations.count('binding') != 0:
calculationString = calculationString + " --ligand=%s" % ligandFile
return calculationString
def ConstructErrorURL(domain="PDT.UnknownDomain",
description="Unexpected Error",
detailedDescription="Unknown error while processing job",
recoverySuggestion="Please contact the developers with details of what you were doing"):
'''Constructs an URL for the WebApps error page
Automatically determines the server name and port from os.envrion
Parameters
domain - The domain of the error - Indicates what part of the program failed
description - A short string which very briefly describes what happened
detailedDescription - A longer string elaborating on the error
recoverySuggestion - A string explaining what to do, if known
Return
A string containing an URL'''
#Add error data as GET data - Possibly a better way?
#e.g. generate html here?
dataDict = {"domain":domain,
"description":description,
"detailedDescription":detailedDescription,
"recoverySuggestion":recoverySuggestion}
data = ["%s=%s" % (key, dataDict[key]) for key in dataDict.keys()]
query = "&".join(data)
location = os.environ['SERVER_NAME'] + ":" + os.environ['SERVER_PORT']
components = ("http", location, "PEATSA/Pages/Error.php", "", query, "")
return urlparse.urlunparse(components)
def ProcessPDBFilename(filename):
'''Processes the input string to be of the form "PDBID.pdb"
This is done in the following way:
- filename is split into base + extension
- Trailing whitespace and underlines are removed
- All punctuation (except for underlines) is removed
- All spaces are replaces with underlines
- The base is lower-cased and appended with .pdb
If the file does not have the '.pdb' extension (in any mixture of cases)
an empty string is returned'''
filename = os.path.basename(filename)
extension = os.path.splitext(filename)[1]
if extension.lower() != ".pdb":
return ""
pdbId = os.path.splitext(filename)[0]
#Strip stuff
pdbId = pdbId.strip()
pdbId = pdbId.strip("_")
#Replace underlines with spaces
#This is so these aren't removed in the next step
pdbId = pdbId.replace("_", " ")
#Remove all punctuation characters
for character in string.punctuation:
pdbId = pdbId.replace(character, "")
#Put the underlines back - this also replaces any
#preexisting spaces with underlines
pdbId = pdbId.replace(" ", "_")
pdbId = pdbId.lower()
return pdbId + '.pdb'
class JobConstructor:
'''Setup a WebApp job using data submitted by the user.
This includes creating output directories, placing files in the
correct location, and forming the command line to be executed.
Many of the options controlling the JobConstructor instances behaviour
are given in the WebApps configuration file.
Principal attributes
- options - The web app options, read from the configuration file
- job - A WebApp.Data.Job instance representing the job
- connection - A connection to the webapp db
- runString - The command line for the job'''
def __init__(self, formData, construct=False):
'''Constructor
Parameters:
formData - A FormData instance
construct - If True this method calls construct immediately,
otherwise construct must be called at a later stage.
This allows parameters to be modified.'''
self.formData = formData
self.runString = None
self.errorData = None
self.job = None
self.jobManager = None
try:
self.options = UtilityFunctions.DefaultWebAppConfiguration()
self.connection = UtilityFunctions.ConnectionFromConfiguration(self.options)
self.webServerDirectory = self.options.get('WEB APPLICATION', 'webServerDirectory')
self.uploadLimit = int(self.options.get('WEB APPLICATION', 'uploadLimit'))
except Core.Exceptions.EnvironmentError, data:
self._setErrorData(data)
return
#Check if the uploaded files exceed the uploadLimit
if self._checkUploadedFiles():
#Connect to the db
self.jobManager = Data.JobManager(self.connection)
if construct is True:
self.construct()
def __del__(self):
self.connection.close()
def __str__(self):
if self.runString != None:
return "JobConstructor - Job not created yet"
else:
return "JobConstructor - Run string:\n\t%s" % self.runString
def _checkUploadedFiles(self):
'''Checks all the uploaded files to see if they are within limits'''
#If a PKA code wasn't provided a
#pdb file must be present - check the file size
if not self.formData.isPKACodePresent():
content = self.formData.pdbFileStream().read()
if len(content) > self.uploadLimit:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error with pdb.",
"detailedDescription":"Filesize exceeds size limit (%.2lfMB)" % (self.uploadLimit/(1024.0*1024.0)),
"recoverySuggestion":"Unfortunately we only can accept files under this limit."}
if len(content) == 0:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error with pdb.",
"detailedDescription":"No data in file",
"recoverySuggestion":"Check that the correct file was provided."}
if self.errorData is not None and self.formData.isLigandPresent():
content = self.formData.ligandFileStream().read()
if len(content) > self.uploadLimit:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error with uploaded ligand.",
"detailedDescription":"Filesize exceeds upload limit (%.2lfMB)" % (self.uploadLimit/(1024.0*1024.0)),
"recoverySuggestion":"Unfortunately we only can accept files under this limit."}
if self.errorData is not None and self.formData.isMutationList():
content = self.formData. mutationListFileStream().read()
if len(content) > self.uploadLimit:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error with uploaded ligand.",
"detailedDescription":"Filesize exceeds upload limit (%.2lfMB)" % (self.uploadLimit/(1024.0*1024.0)),
"recoverySuggestion":"Unfortunately we only can accept files under this limit."}
if self.errorData is not None:
return False
else:
return True
def _setErrorData(self, data):
'''Convenience method for creating errorData due to a configuration error.
The exact reason for the error is provided by data.'''
self.errorData = {"domain":"PDT.ConfigurationDomain",
"description":"Error initialising job submission environment.",
"detailedDescription":data,
"recoverySuggestion":"This is a bug - please contact the developers."}
def _writeStringToFile(self, string, filename):
'''Convenience method for writing to a file'''
stream = open(filename, "w+")
stream.write(string)
stream.close()
def construct(self):
'''Performs all necessary steps for setting up a WebApp job based on the data submitted by a user via the WebApp main page.
This basically involves three steps.
- Creation of a entry for the Job in the WebApp database
- Outputing the job files uploaded by the user to correct locations
- Assembing the command line string that will be exectuted when runBackend() is called.
Check the result of errorData() to see if there were any problems with construction'''
#For each job run a instance of the Job class is created using the JobManager object.
#This creates the necessary entries in the MySQL database.
#The Job instance contains info on the job and allows the data stored for the job to be modified.
self.job = self.jobManager.createJob(self.formData.pdbId(), self.formData.calculations())
#Create the input/output directory names
try:
jobOutputDirectory = self.options.get('WEB APPLICATION', 'jobOutputDirectory')
jobInputDirectory = self.options.get('WEB APPLICATION', 'jobInputDirectory')
pKaDataDirectory = self.options.get('WEB APPLICATION', 'pKaDataDirectory')
backendExecutable = self.options.get('WEB APPLICATION', 'launchScript')
except Core.Exceptions.EnvironmentError, data:
self._setErrorData(data)
return
#Get the various IO directories that will be used by the job
#If the job is to be run on the local host then jobInputDirectory must
#be the same as webServerDirectory
webStorageDir = os.path.join(self.webServerDirectory, self.job.identification)
outputDir = os.path.join(jobOutputDirectory, self.job.identification + '_Out')
if self.formData.isPKACodePresent():
workingDir = os.path.join(pKaDataDirectory, self.formData.pKaCode())
else:
workingDir = os.path.join(jobOutputDirectory, self.job.identification + '_Work')
inputDir = os.path.join(jobInputDirectory, self.job.identification + '_Out')
os.mkdir(webStorageDir)
#If this is not a delta pKa calculation we have to write
#Otherwise write the uploaded/download one to a file
if not self.formData.isPKACodePresent():
#To be deprecated
filename = os.path.join(webStorageDir, '%s.pdb' % self.formData.pdbId())
stream = self.formData.pdbFileStream()
self._writeStringToFile(stream.read(), filename)
pdbFilename = self.formData.pdbFilename()
#Add structure to db
stream.seek(0)
self.job.setStructure(stream.read())
#Check the provided pdb
error = False
try:
structure = self.job.protoolStructure()
except Exceptions.FileFormatError, data:
error = True
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in with submission.",
"detailedDescription": "There is an error in the format of the pdb file",
"recoverySuggestion": 'Check the file to ensure its format is correct'}
self.job.setError(description=self.errorData['description'],
detailedDescription=self.errorData['detailedDescription'])
self.job.setState('Finished')
if error is True:
return
if structure.hasMissingMainChainAtoms():
missing = structure.hasMissingMainChainAtoms()
missing = ", ".join(missing)
suggestion = "The residues with missing atoms are: %s.<br>" % missing
suggestion = suggestion + "PEAT-SA requires that all main-chain heavy atoms are present in the structure.<br>"
suggestion = suggestion + "You could try submitting a fragment of the structure that meets this requirement."
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted pdb structure.",
"detailedDescription":"The supplied structure is missing main-chain heavy atoms",
"recoverySuggestion": suggestion}
self.job.setError(description=self.errorData['description'],
detailedDescription=self.errorData['detailedDescription'])
self.job.setState('Finished')
return
elif structure.hasChainBreak():
suggestion = "PEAT-SA requires that all chains in submitted structures are complete.<br>"
suggestion = suggestion + "You could try submitting a fragment of the structure that meets this requirement."
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted pdb structure.",
"detailedDescription":"The supplied structure contains at least one chain-break.",
"recoverySuggestion": suggestion}
self.job.setError(description=self.errorData['description'],
detailedDescription=self.errorData['detailedDescription'])
self.job.setState('Finished')
return
else:
pdbFilename = os.path.join(workingDir, self.formData.pdbFilename())
if self.formData.isLigandPresent():
filename = os.path.join(webStorageDir, '%s' % self.formData.ligandFilename())
stream = self.formData.ligandFileStream()
self._writeStringToFile(stream.read(), filename)
stream.seek(0)
self.job.setLigand(stream.read())
#Add email address - This could be just 'Unknown' if none was provided
self.job.setEmail(self.formData.email())
#Create the mutation string.
#This also writes out the mutation file if neccessary
mutationString = CreateMutationString(self.formData.mutationData, webStorageDir, inputDir, self.job)
#if the mutationData is a mutation-list check their are actually some mutations in it
if self.job.isMutationList():
mutationListFile = self.job.mutationListFile()
if mutationListFile.numberOfMutants() == 0:
suggestion = "Return to the submission page and enter some mutation codes in the text-box or upload a file"
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted mutation list.",
"detailedDescription":"The supplied mutation list contains no mutation codes.",
"recoverySuggestion": suggestion}
self.job.setError(description=self.errorData['description'],
detailedDescription=self.errorData['detailedDescription'])
self.job.setState('Finished')
return
calculationString = CreateCalculationString(self.formData.calculations(), self.formData.ligandFilename())
#Create the run string
self.runString = "%s -p %s -w %s -o %s -j %s -v %s %s" % (backendExecutable, os.path.join(inputDir, pdbFilename),
workingDir, outputDir, self.job.identification,
calculationString, mutationString)
if self.formData.isIonisableGroupsPresent():
self.runString += " --ionisableGroups=%s" % self.formData.ionisableGroups()
def runBackend(self):
'''Executes the command string for the job via Popen
Returns:
An URL which specifies a page giving information about the Job
or None if construct() has not been called.'''
if self.runString == None:
return
try:
#Put selected calculations into state Queued regardless of whether this following works or not.
#This avoid a possible a race condition if the backed is launched quickly between who
#modified the jobs state first
states = self.job.calculationStates()
for calculation in states.keys():
if states[calculation] != "NotSelected":
self.job.setCalculationState(calculation, "Queued")
#Start the job running
#FIXME - Wont really be able to use files for a log
standardOut = open(os.path.join(self.webServerDirectory, "PDTWebRuns.log"), "a")
standardOut.write("\n----------------------------------------------------------\n")
standardOut.write("\nRunning Job %s\nDate %s\nWebScript command line %s\n\n" % (self.job.identification, self.job.date, self.runString))
standardOut.flush()
standardError = open(os.path.join(self.webServerDirectory, "PDTWebErrors.log"), "a")
process = subprocess.Popen(self.runString, shell=True, stdout=standardOut, stderr=standardError)
#Wait until the job is running
time.sleep(1.0)
process.poll()
if process.returncode != None and process.returncode != 0:
string = "Unable to launch job - launch script exited with error %d" % process.returncode
standardError.write(string)
raise Exceptions.SubmissionException, string
standardOut.close()
standardError.close()
#Constrtuct the url for the processing job page
#Pass the information on what is being calculated on aswell
#The elements here are scheme, location, hierarchical path, parameters, query, fragment
location = os.environ['SERVER_NAME'] + ":" + os.environ['SERVER_PORT']
components = ("http", location, "PEATSA/Pages/Results.php", "", "jobId=%s" % self.job.identification, "")
resultURL = urlparse.urlunparse(components)
except BaseException, data:
if hasattr(data, "child_traceback"):
errorString = "Exception %s. \n Child traceback %s" % (data, data.child_traceback)
else:
errorString = "Exception - %s" % data
#Delete job information from the db if it exists
if self.job is not None:
self.jobManager.deleteJob(self.job)
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error when attempting to run job.",
"detailedDescription":errorString,
"recoverySuggestion":"This is a bug - please contact the developers."}
resultURL = self.errorURL()
return resultURL
def errorURL(self):
'''Returns an URL for the WebApp error page if there was an error with the form data.
On loading this URL the user will be presented with information regarding what went wrong.
If there was no problem this methods return None'''
if self.errorData is None:
return None
return ConstructErrorURL(domain=self.errorData["domain"],
description=self.errorData["description"],
detailedDescription=self.errorData["detailedDescription"],
recoverySuggestion=self.errorData["recoverySuggestion"])
def error(self):
'''See FormData.errorURL docs for information'''
return self.errorData
class FormData:
'''Class representing the form data submitted from the WebApp main page'''
def __init__(self, formData):
'''Initialises the Data class.
formData must be an instance of the cgi.FieldStorage class'''
self.errorData = None
self.formData = formData
self.pdbStream = None
self._processMutationData()
self._checkSubmittedData()
if self.errorData is None:
self._setPDBStream()
def _setPDBStream(self):
'''Assigns a stream to the pdb data to the pdbStream ivar.
If the stream cannot be created it sets an error.
Note, a stream is only created if a delta-pKa calculation is not requested.
In this case the pdb file to be used is already available'''
if self.isPDBFilePresent():
self.pdbStream = self.formData["pdbFile"].file
self.pdbStream.seek(0)
elif self.isPDBCodePresent():
data = {}
self.pdbStream = GetPDB(self.pdbId(), dict=data)
if data.has_key('error'):
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":data['error'],
"detailedDescription": data['description'],
"recoverySuggestion":"Check the supplied code is valid"}
else:
self.pdbStream.seek(0)
def _checkSubmittedData(self):
'''Performs a series of checks on the submitted data'''
if not self.isPDBFilePresent() and not (self.isPDBCodePresent() or self.isPKACodePresent()):
#No pdb supplied - error
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"No PDB file was uploaded and no PDB code was provided. Hard to do a calculation on nothing!",
"recoverySuggestion":"Head back to the main page and upload a PDB or provide a PDB code."}
elif not self.isCalculationDataPresent():
#No calculation specified
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"At least one calculation type must be selected.",
"recoverySuggestion":"Head back to the main page and choose some calculations."}
elif not self.isMutationDataPresent():
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"The mutations to perform must be specified.",
"recoverySuggestion":"Head back to the main page and choose some mutations or upload a mutation file."}
elif self.calculations().count('binding') == 1 and not self.isLigandPresent():
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"Binding selected but no ligand provided.",
"recoverySuggestion":"Head back to the main page and upload a ligand."}
elif self.calculations().count('scan') == 1 and not self.isPKACodePresent():
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"pKa Scan selected but no pKa calculation code provided.",
"recoverySuggestion":"In order to perform a scan you must have previously completed a pKa calculation."}
elif self.calculations().count('scan') == 0 and self.isPKACodePresent():
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"pKa calculation code provided but pKa scan not selected.",
"recoverySuggestion":"Please select delta pKa option if you want to perform a scan."}
#If theres been an error at this stage return now
if self.errorData is not None:
return
#Check the submitted PDB filename
# In order to standardize the names of the directories
# (so each can be identified with a specific id) the following operations are required
# - The filename must be of the form PDBID.pdb
# - The PDBID must be all lowercase
# - The PDBID must not containing any punctuation marks except for underscores
# - No spaces allowed
if self.isPDBFilePresent():
self.standardizedPDBFilename = ProcessPDBFilename(self.formData["pdbFile"].filename)
elif self.isPDBCodePresent():
self.standardizedPDBFilename = ProcessPDBFilename(self.formData.getvalue("pdbCode") + ".pdb")
elif self.isPKACodePresent():
self.standardizedPDBFilename = self.pKaCode() + ".pdb"
#Check that after the processing pdbFilename is not just an extension
if self.standardizedPDBFilename == "" or self.standardizedPDBFilename[0] == ".":
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"The filename of the uploaded pdb is invalid.",
"recoverySuggestion":"Go back to the main page and check the naming guidelines for uploaded pdb files."}
#Check the ligand file extension is mol2 (if it exists).
if self.isLigandPresent():
components = os.path.splitext(self.ligandFilename())
if len(components) != 2:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"The filename of the uploaded ligand is invalid (missing extension).",
"recoverySuggestion":"Go back to the main page and check the naming guidelines for uploaded ligand files."}
elif components[1].lower() != ".mol2":
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"The filename of the uploaded ligand is invalid - %s." % components[1],
"recoverySuggestion":"The filename extension must be mol2"}
def _processMutationData(self):
self.mutationData = {"Type":"Unknown", "Data":"Unknown"}
if self.isMutationDataPresent():
#Process the mutations
if self.isMutationList():
self.mutationData['Type']='mutationList'
#See if they uploaded a file or typed one in
if self.formData["mutationFile"].filename:
#The file element is a stream
self.mutationData['Data']=self.formData["mutationFile"].file
else:
list = self.formData.getvalue('mutationListArea')
#Create a file-like stream object for the list
self.mutationData['Data'] = StringIO.StringIO(list)
else:
#Must be Resiude Scan since otherwise we wouldn't be here
self.mutationData['Type']='mutation'
self.mutationData['Data']=self.formData["residue"].value
def error(self):
'''Returns a dictionary containing details on any errors with the form data
The dictionary has the following keys
- domain
- description
- detailedDescription
- recoverySuggestion
The method returns None if there is no problem with the form data'''
return self.errorData
def errorURL(self):
'''Returns an URL for the WebApp error page if there was an error with the form data.
On loading this URL the user will be presented with information regarding what went wrong.
If there was no problem this methods return None'''
if self.errorData is None:
return None
return ConstructErrorURL(domain=self.errorData["domain"],
description=self.errorData["description"],
detailedDescription=self.errorData["detailedDescription"],
recoverySuggestion=self.errorData["recoverySuggestion"])
def isPDBFilePresent(self):
'''Returns True if the form data contains a PDB file'''
pdbProvided = False
if self.formData.has_key("pdbFile"):
if self.formData["pdbFile"].filename != "":
pdbProvided = True;
return pdbProvided
def isLigandPresent(self):
'''Returns True if formData contains a PDB file'''
provided = False
if self.formData.has_key("ligandFile"):
if self.formData["ligandFile"].filename != "":
provided = True;
return provided
def isCodePresent(self):
'''Returns True if the code field contains text'''
codeProvided = False
if self.formData.getvalue("pdbCode") != "":
codeProvided = True
return codeProvided
def isPDBCodePresent(self):
'''Returns True if the form data contains a PDB code.
The form data contains a PDB code is their is an entry in the
code field and "Scan" is not selected'''
pdbProvided = False
if self.isCodePresent() and not self.isDeltaPKACalculationRequested():
pdbProvided = True
return pdbProvided
def isPKACodePresent(self):
'''Returns True of formData contain a pKa code
The form data contains a pKa code if "Scan" is selected
and their is an entry in the code field'''
pKaProvided = False
if self.isCodePresent() and self.isDeltaPKACalculationRequested():
pKaProvided = True
return pKaProvided
def isDeltaPKACalculationRequested(self):
'''Returns True if a delta pKa calculation was requested'''
provided = False
if self.calculations().count('scan') == 1:
provided = True
return provided
def isMutationDataPresent(self):
'''Returns True if formData contains mutation information'''
mutationProvided = False
mutationChoice = self.formData.getlist("mutation")
if len(mutationChoice) == 1:
mutationProvided = True
return mutationProvided
def isCalculationDataPresent(self):
'''Returns True if data on what calculations to perform is present'''
present = False
if len(self.calculations()) != 0:
present = True
return present
def isIonisableGroupsPresent(self):
'''Returns True if ionisable groups were specified AND a dpKa calculation was requested'''
present = False
if self.isDeltaPKACalculationRequested():
string = self.ionisableGroups()
if string != "" and string.lower() != "all":
present = True
return present
def isMutationList(self):
'''Returns True if the formData contains a mutationList file'''
mutationList = False
mutationChoice = self.formData.getlist("mutation")
if mutationChoice.count('mutationFile') != 0:
mutationList=True
return mutationList
def calculations(self):
'''Returns a list containing the names of the calculations requested'''
return self.formData.getlist("calculation")
def pdbFilename(self):
'''Returns the filename of the pdb - note this does not include a path'''
return self.standardizedPDBFilename
def pdbId(self):
return os.path.splitext(self.pdbFilename())[0]
def pdbFileStream(self):
'''Returns an opened stream to the pdbFile'''
self.pdbStream.seek(0)
return self.pdbStream
def ligandFilename(self):
filename = self.formData["ligandFile"].filename
filename = os.path.basename(filename)
return self.formData["ligandFile"].filename
def ligandFileStream(self):
'''Returns an opened stream to the ligand file'''
stream = self.formData["ligandFile"].file
stream.seek(0)
return stream
def mutationListFileStream(self):
'''Returns an opened stream to the mutationList file'''
if self.mutationData['Type'] == 'mutationList':
stream = self.mutationData["Data"]
stream.seek(0)
return stream
def pKaCode(self):
'''Returns the pKa code.
If none is present returns None.
A pKa code is deemed present if the code field is filled
and a delta-pKa calculation is requested'''
retval = None
if self.isPKACodePresent():
retval = self.formData.getvalue("pdbCode")
return retval
def email(self):
'''Returns the email address if their is one'''
retval = self.formData.getvalue("email")
if retval == "":
retval = "Unknown"
return retval
def ionisableGroups(self):
return self.formData.getvalue("ionisableGroups")
|
from django.apps import AppConfig
class WagtailLocalizeRwsLanguageCloudAppConfig(AppConfig):
label = "wagtail_localize_rws_languagecloud"
name = "wagtail_localize_rws_languagecloud"
verbose_name = "Wagtail Localize RWS LanguageCloud"
default_auto_field = "django.db.models.AutoField"
def ready(self):
import wagtail_localize_rws_languagecloud.checks # noqa: F401
|
class bit: pass
class bit2: pass
class bit3: pass
class bit4: pass
class bit5: pass
class bit6: pass
class bit7: pass
class bit8: pass
class bit9: pass
class bit10: pass
class bit11: pass
class bit12: pass
class bit13: pass
class bit14: pass
class bit15: pass
class bit16: pass
class bit17: pass
class bit18: pass
class bit19: pass
class bit20: pass
class bit21: pass
class bit22: pass
class bit23: pass
class bit24: pass
class bit25: pass
class bit26: pass
class bit27: pass
class bit28: pass
class bit29: pass
class bit30: pass
class bit31: pass
class bit32: pass
class bit33: pass
class bit34: pass
class bit35: pass
class bit36: pass
class bit37: pass
class bit38: pass
class bit39: pass
class bit40: pass
class bit41: pass
class bit42: pass
class bit43: pass
class bit44: pass
class bit45: pass
class bit46: pass
class bit47: pass
class bit48: pass
class bit49: pass
class bit50: pass
class bit51: pass
class bit52: pass
class bit53: pass
class bit54: pass
class bit55: pass
class bit56: pass
class bit57: pass
class bit58: pass
class bit59: pass
class bit60: pass
class bit61: pass
class bit62: pass
class bit63: pass
class bit64: pass
class bit65: pass
class bit66: pass
class bit67: pass
class bit68: pass
class bit69: pass
class bit70: pass
class bit71: pass
class bit72: pass
class bit73: pass
class bit74: pass
class bit75: pass
class bit76: pass
class bit77: pass
class bit78: pass
class bit79: pass
class bit80: pass
class bit81: pass
class bit82: pass
class bit83: pass
class bit84: pass
class bit85: pass
class bit86: pass
class bit87: pass
class bit88: pass
class bit89: pass
class bit90: pass
class bit91: pass
class bit92: pass
class bit93: pass
class bit94: pass
class bit95: pass
class bit96: pass
class bit97: pass
class bit98: pass
class bit99: pass
class bit100: pass
class bit101: pass
class bit102: pass
class bit103: pass
class bit104: pass
class bit105: pass
class bit106: pass
class bit107: pass
class bit108: pass
class bit109: pass
class bit110: pass
class bit111: pass
class bit112: pass
class bit113: pass
class bit114: pass
class bit115: pass
class bit116: pass
class bit117: pass
class bit118: pass
class bit119: pass
class bit120: pass
class bit121: pass
class bit122: pass
class bit123: pass
class bit124: pass
class bit125: pass
class bit126: pass
class bit127: pass
class bit128: pass
class bit256: pass
class bit512: pass
class bit1024: pass
class int2: pass
class int3: pass
class int4: pass
class int5: pass
class int6: pass
class int7: pass
class int8: pass
class int9: pass
class int10: pass
class int11: pass
class int12: pass
class int13: pass
class int14: pass
class int15: pass
class int16: pass
class int17: pass
class int18: pass
class int19: pass
class int20: pass
class int21: pass
class int22: pass
class int23: pass
class int24: pass
class int25: pass
class int26: pass
class int27: pass
class int28: pass
class int29: pass
class int30: pass
class int31: pass
class int32: pass
class int33: pass
class int34: pass
class int35: pass
class int36: pass
class int37: pass
class int38: pass
class int39: pass
class int40: pass
class int41: pass
class int42: pass
class int43: pass
class int44: pass
class int45: pass
class int46: pass
class int47: pass
class int48: pass
class int49: pass
class int50: pass
class int51: pass
class int52: pass
class int53: pass
class int54: pass
class int55: pass
class int56: pass
class int57: pass
class int58: pass
class int59: pass
class int60: pass
class int61: pass
class int62: pass
class int63: pass
class int64: pass
class int65: pass
class int66: pass
class int67: pass
class int68: pass
class int69: pass
class int70: pass
class int71: pass
class int72: pass
class int73: pass
class int74: pass
class int75: pass
class int76: pass
class int77: pass
class int78: pass
class int79: pass
class int80: pass
class int81: pass
class int82: pass
class int83: pass
class int84: pass
class int85: pass
class int86: pass
class int87: pass
class int88: pass
class int89: pass
class int90: pass
class int91: pass
class int92: pass
class int93: pass
class int94: pass
class int95: pass
class int96: pass
class int97: pass
class int98: pass
class int99: pass
class int100: pass
class int101: pass
class int102: pass
class int103: pass
class int104: pass
class int105: pass
class int106: pass
class int107: pass
class int108: pass
class int109: pass
class int110: pass
class int111: pass
class int112: pass
class int113: pass
class int114: pass
class int115: pass
class int116: pass
class int117: pass
class int118: pass
class int119: pass
class int120: pass
class int121: pass
class int122: pass
class int123: pass
class int124: pass
class int125: pass
class int126: pass
class int127: pass
class int128: pass
class uint2: pass
class uint3: pass
class uint4: pass
class uint5: pass
class uint6: pass
class uint7: pass
class uint8: pass
class uint9: pass
class uint10: pass
class uint11: pass
class uint12: pass
class uint13: pass
class uint14: pass
class uint15: pass
class uint16: pass
class uint17: pass
class uint18: pass
class uint19: pass
class uint20: pass
class uint21: pass
class uint22: pass
class uint23: pass
class uint24: pass
class uint25: pass
class uint26: pass
class uint27: pass
class uint28: pass
class uint29: pass
class uint30: pass
class uint31: pass
class uint32: pass
class uint33: pass
class uint34: pass
class uint35: pass
class uint36: pass
class uint37: pass
class uint38: pass
class uint39: pass
class uint40: pass
class uint41: pass
class uint42: pass
class uint43: pass
class uint44: pass
class uint45: pass
class uint46: pass
class uint47: pass
class uint48: pass
class uint49: pass
class uint50: pass
class uint51: pass
class uint52: pass
class uint53: pass
class uint54: pass
class uint55: pass
class uint56: pass
class uint57: pass
class uint58: pass
class uint59: pass
class uint60: pass
class uint61: pass
class uint62: pass
class uint63: pass
class uint64: pass
class uint65: pass
class uint66: pass
class uint67: pass
class uint68: pass
class uint69: pass
class uint70: pass
class uint71: pass
class uint72: pass
class uint73: pass
class uint74: pass
class uint75: pass
class uint76: pass
class uint77: pass
class uint78: pass
class uint79: pass
class uint80: pass
class uint81: pass
class uint82: pass
class uint83: pass
class uint84: pass
class uint85: pass
class uint86: pass
class uint87: pass
class uint88: pass
class uint89: pass
class uint90: pass
class uint91: pass
class uint92: pass
class uint93: pass
class uint94: pass
class uint95: pass
class uint96: pass
class uint97: pass
class uint98: pass
class uint99: pass
class uint100: pass
class uint101: pass
class uint102: pass
class uint103: pass
class uint104: pass
class uint105: pass
class uint106: pass
class uint107: pass
class uint108: pass
class uint109: pass
class uint110: pass
class uint111: pass
class uint112: pass
class uint113: pass
class uint114: pass
class uint115: pass
class uint116: pass
class uint117: pass
class uint118: pass
class uint119: pass
class uint120: pass
class uint121: pass
class uint122: pass
class uint123: pass
class uint124: pass
class uint125: pass
class uint126: pass
class uint127: pass
class uint128: pass
class List: pass
class Tuple: pass
|
# pylint: disable=no-name-in-module
from typing import List
from tensorflow.keras.initializers import Constant
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers.experimental.preprocessing import \
TextVectorization
from tensorflow import data
from modern_talking.data.glove import get_glove_embedding_matrix
# Workaround as we cannot import directly like this:
# `from tensorflow.data import Dataset`
Dataset = data.Dataset
def text_vectorization_layer(
texts: List[str],
max_tokens: int = 100_000,
output_sequence_length: int = None,
) -> TextVectorization:
"""
Create a text vectorization layer with the
for the most frequent tokens in the given texts.
"""
layer = TextVectorization(
max_tokens,
output_sequence_length=output_sequence_length,
)
text_dataset = Dataset.from_tensor_slices(texts)
layer.adapt(text_dataset)
return layer
def glove_embedding_layer(
vectorization_layer: TextVectorization
) -> Embedding:
"""
Create a GloVe word embedding layer
to be used after the vectorization layer.
Note that GloVe embeddings have to be downloaded first.
"""
vocabulary = vectorization_layer.get_vocabulary()
initial_matrix = get_glove_embedding_matrix(vocabulary)
dimension = initial_matrix.shape[1]
layer = Embedding(
len(vocabulary) + 2,
dimension,
embeddings_initializer=Constant(initial_matrix),
)
return layer
|
from datetime import datetime
import logging
import os
import traceback
import gcloud.logging
from google.protobuf.json_format import ParseDict
from google.protobuf.struct_pb2 import Struct
import gunicorn.glogging
from server.logging import REQUEST_LOG_VARIABLE
ACCESS_LOG_FORMAT = '%(message)s (%(pathname)s:%(lineno)d, in %(funcName)s)'
ERROR_LOG_FORMAT = '[gunicorn] %(message)s'
access_formatter = logging.Formatter(ACCESS_LOG_FORMAT)
error_formatter = logging.Formatter(ERROR_LOG_FORMAT)
def format_time(dt):
"""Formats a naive datetime as UTC time"""
return dt.isoformat() + 'Z'
class ProcessCloudLogger:
"""Call get_logger() to get a Google Cloud logger instance. Ensures that
each process has its own logger.
"""
def __init__(self):
self.logger_pid = None
self.logger = None
self.log_name = os.getenv('GOOGLE_LOG_NAME', 'ok-default')
def get_instance(self):
pid = os.getpid()
if self.logger_pid != pid:
self.logger_pid = pid
client = gcloud.logging.Client()
self.logger = client.logger(self.log_name)
return self.logger
def log_proto(self, *args, **kwargs):
try:
self.get_instance().log_proto(*args, **kwargs)
except Exception:
traceback.print_exc()
traceback.print_exc()
def log_struct(self, *args, **kwargs):
try:
self.get_instance().log_struct(*args, **kwargs)
except Exception:
traceback.print_exc()
def log_text(self, *args, **kwargs):
try:
self.get_instance().log_text(*args, **kwargs)
except Exception:
traceback.print_exc()
class GoogleCloudHandler(logging.Handler):
def __init__(self, cloud_logger):
super().__init__()
self.cloud_logger = cloud_logger
def handle(self, record):
message = error_formatter.format(record)
self.cloud_logger.log_text(message, severity=record.levelname)
class Logger(gunicorn.glogging.Logger):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cloud_logger = ProcessCloudLogger()
self.error_log.addHandler(GoogleCloudHandler(self.cloud_logger))
def access(self, resp, req, environ, request_time):
super().access(resp, req, environ, request_time)
# Ignore health check
if environ['PATH_INFO'] == '/healthz':
return
# See gunicorn/glogging.py
status = resp.status
if isinstance(status, str):
status = status.split(None, 1)[0]
now = datetime.utcnow()
level = logging.NOTSET
message = {
'@type': 'type.googleapis.com/google.appengine.logging.v1.RequestLog',
'ip': environ.get('REMOTE_ADDR'),
'startTime': format_time(now - request_time),
'endTime': format_time(now),
'latency': '%d.%06ds' % (request_time.seconds, request_time.microseconds),
'method': environ['REQUEST_METHOD'],
'resource': environ['PATH_INFO'],
'httpVersion': environ['SERVER_PROTOCOL'],
'status': status,
'responseSize': getattr(resp, 'sent', None),
'userAgent': environ.get('HTTP_USER_AGENT'),
}
request_log = environ.get(REQUEST_LOG_VARIABLE)
if request_log:
message['urlMapEntry'] = request_log.endpoint
message['line'] = [
{
'time': format_time(datetime.utcfromtimestamp(record.created)),
'severity': record.levelname,
'logMessage': access_formatter.format(record),
# The log viewer only wants real App Engine files, so we
# can't put the actual file here.
'sourceLocation': None,
}
for record in request_log.lines
]
level = max(
(record.levelno for record in request_log.lines),
default=logging.NOTSET,
)
if level > logging.NOTSET:
severity = logging.getLevelName(level)
else:
severity = None
struct_pb = ParseDict(message, Struct())
self.cloud_logger.log_proto(struct_pb, severity=severity)
try:
client = gcloud.logging.Client()
except Exception:
Logger = gunicorn.glogging.Logger # noqa: F811
if os.getenv('APPINSIGHTS_INSTRUMENTATIONKEY'):
Logger = gunicorn.glogging.Logger # noqa: F811
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from astropy.stats import SigmaClip
from photutils import (
Background2D,
SExtractorBackground,
MMMBackground,
ModeEstimatorBackground,
MedianBackground,
MeanBackground
)
from astropy.io import fits
def bkg_estimation(filename, box=(20, 20), filter_size=(3, 3),
bkg_estimator='SExtractor', sigma=3.,
sigma_lower=None, sigma_upper=None,
maxiters=10, outLevel=1):
imagelist = np.atleast_1d(filename)
for ima in imagelist:
print("\nEstimate background in %s" % ima)
root = os.path.splitext(ima)[0]
hdulist = fits.open(ima)
data = hdulist[0].data
sigma_clip = SigmaClip(sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
maxiters=maxiters)
if bkg_estimator == 'SExtractor':
bkg_estimator = SExtractorBackground()
elif bkg_estimator == 'MMM':
bkg_estimator = MMMBackground()
elif bkg_estimator == 'ModeEstimator':
bkg_estimator = ModeEstimatorBackground()
elif bkg_estimator == 'Median':
bkg_estimator = MedianBackground()
elif bkg_estimator == 'Mean':
bkg_estimator = MeanBackground()
bkg = Background2D(
data,
box,
filter_size=filter_size,
sigma_clip=sigma_clip,
bkg_estimator=bkg_estimator,
)
# Create image with background substracted
hdulist[0].data = data - bkg.background
hdulist.writeto(ima, overwrite=True)
if outLevel == 2:
# Create image with 2D background
bkg_file = root + "_bkg_map.fits"
hdulist[0].data = bkg.background
hdulist.writeto(bkg_file, overwrite=True)
|
import pandas as pd
import xml.etree.ElementTree as tree
def iter_pdv(root, stations=False):
for doc in root.iterfind('pdv'):
info = doc.attrib.copy()
info['adresse'] = doc.find('adresse').text
info['ville'] = doc.find('ville').text
if stations:
yield info
else:
for prix in doc.iterfind('prix'):
output = prix.attrib.copy()
output['id'] = info['id']
yield output
with open("/Users/xo/Downloads/PrixCarburants_annuel_2016.xml", 'r') as f:
etree = tree.fromstringlist(f.readlines())
stations = pd.DataFrame(list(iter_pdv(etree, True)))
stations = stations.set_index('id')
stations[['adresse', 'cp', 'ville', 'pop']].to_csv('stations_2016.csv')
print(stations.head())
prix = pd.DataFrame(list(iter_pdv(etree, False)))
prix.valeur = prix.valeur.astype(float) / 1000
print(prix.head())
prix.to_csv('prix_2016.csv', index=False)
|
import fnmatch
import pathlib
import posixpath
import re
from os import fsencode
from typing import List
from urllib.parse import quote_from_bytes, urlparse
from paaaaath.common import PurePath
class _UriFlavour(pathlib._Flavour): # type: ignore
sep = "/"
altsep = ""
has_drv = True
schemes: List[str] = []
pathmod = type("fakepath", (), {"normpath": lambda _, x: x})()
is_supported = True
def splitroot(self, part, sep=sep):
url = urlparse(part)
scheme = url.scheme
netloc = url.netloc
path = url.path
if scheme == "" and netloc != "":
path = f"{sep}{netloc}{sep}{path}".rstrip(sep)
netloc = ""
drv = "" if scheme == "" else f"{scheme}://{netloc}"
part = path.lstrip("/")
root = sep if drv != "" or path != part else ""
has_unknown_scheme = not any(drv.startswith(f"{s}://") for s in self.schemes)
if drv != "" and (0 < len(self.schemes) and has_unknown_scheme):
raise ValueError(f"http and https are only supported. but {drv} was given.")
return drv, root, part
def casefold(self, s):
return s
def casefold_parts(self, parts):
return parts
def compile_pattern(self, pattern):
return re.compile(fnmatch.translate(pattern)).fullmatch
def resolve(self, path, strict=False):
norm_path = posixpath.normpath(self.join(path.parts[1:]))
return f"{path.drive}{path.root}{norm_path}"
def is_reserved(self, parts):
return False
def make_uri(self, path):
bpath = bytes(fsencode(self.join(path.parts[1:])))
return f"{path.drive}{path.root}{quote_from_bytes(bpath)}"
def gethomedir(self, username):
raise NotImplementedError("gethomedir() not available on this system")
_uri_flavour = _UriFlavour()
@PurePath.register()
class PureUriPath(PurePath):
_flavour = _uri_flavour
__slots__ = ()
|
import discord
import simplejson as json
import functions
from discord.ext import commands
class Mention(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
@commands.guild_only()
async def on_message(self, message: discord.Message):
"""Sends a message with the guild's bot prefix"""
if "purge" in message.content:
return
if self.client.user.mentioned_in(message):
lang = functions.getLang.getLang(message.guild.id)
with open("serverconfig/prefixes.json", "r") as f:
prefix = json.load(f)[str(message.guild.id)]
with open(f"embeds/{lang}/mentionPrefix.json", "r") as f:
msg = json.load(f)
await message.reply(msg.replace("%VAR", prefix), mention_author=False, delete_after=20)
def setup(client):
client.add_cog(Mention(client))
|
#!env/bin/python
"""
Run this file to create rst versions from doc-related notebooks.
All notebooks are executed and require a running elasticsearch at localhost:9200
"""
import tempfile
import subprocess
import time
import os
import shutil
import shutil
import argparse
from elasticsearch import NotFoundError
from elastipy import connections
from docs.helper import remove_hidden_cells_in_file, fix_links_in_rst_file
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"-e", "--execute", type=bool, default=False, nargs="?", const=True,
help="Execute the notebooks before conversion. "
"This is required for proper documentations!"
" Only skip execution for development purposes."
)
return parser.parse_args()
EXECUTE_NOTEBOOKS = True
DOCS_DIR = "docs"
HIDDEN_CELLS = [
r"^# hidden.*",
r"^<AxesSubplot.*>$",
]
RUN_BUT_HIDDEN_CELLS = [
r"^# run-but-hidden",
r"^# run-but-hide"
]
def export_notebook(
filename: str,
format: str,
directory: str,
do_rename_lexer: bool = True
):
args = [
"jupyter", "nbconvert",
f"--to={format}", f"--output-dir={directory}",
f"--RegexRemovePreprocessor.patterns={repr(HIDDEN_CELLS)}",
]
if EXECUTE_NOTEBOOKS:
args += ["--execute"]
env = os.environ.copy()
env["PYTHONPATH"] = ".."
result = subprocess.call(
args + [filename],
env=env,
)
if result:
raise AssertionError(
f"Exporting notebook {filename} failed with exit-code {result}"
)
ext = {"markdown": "md"}.get(format, format)
out_filename = filename.split(os.path.sep)[-1].replace(".ipynb", f".{ext}")
if do_rename_lexer:
# rename the "ipython3" lexer so readthedocs does not need ipython installed
# It's safe because we do not use cell magic or other extended stuff
rename_lexer(
os.path.join(directory, out_filename),
"ipython3",
"python3"
)
remove_hidden_cells_in_file(
os.path.join(directory, out_filename),
HIDDEN_CELLS + RUN_BUT_HIDDEN_CELLS
)
if format == "rst":
fix_links_in_rst_file(
os.path.join(directory, out_filename)
)
def rename_lexer(filename, old, new):
with open(filename, "r") as fp:
text = fp.read()
replaced_text = text.replace(
f".. code:: {old}",
f".. code:: {new}",
)
if replaced_text != text:
print(f"renaming lexer {old} to {new} in {filename}")
with open(filename, "w") as fp:
fp.write(replaced_text)
def render_tutorial():
# delete the shapes index if it is present
try:
connections.get().indices.delete("elastipy-example-shapes")
except NotFoundError:
pass
# remove the previous files
try:
shutil.rmtree(f"{DOCS_DIR}/tutorial_files")
except FileNotFoundError:
pass
export_notebook("examples/tutorial.ipynb", "rst", DOCS_DIR)
def render_quickref():
"""
Renders the docs/quickref.ipynb notebook, converts to markdown
and inserts the stuff into the README.md
Also puts a .rst copy in the docs
"""
export_notebook("docs/quickref.ipynb", "rst", DOCS_DIR)
with open("docs/quickref.rst") as fp:
text = fp.read()
with open("docs/quickref.rst", "w") as fp:
fp.write("Overview\n========\n\n\n" + text)
with tempfile.TemporaryDirectory() as TEMP_DIR:
export_notebook("docs/quickref.ipynb", "markdown", TEMP_DIR)
with open(os.path.join(TEMP_DIR, "quickref.md")) as fp:
quickref = fp.read().strip() + "\n\n"
# put between these two lines in README.md
README_START = "### configuration"
README_END = "**More examples can be found [here](examples).**"
with open("README.md") as fp:
readme = fp.read()
index_start = readme.index(README_START)
index_end = readme.index(README_END)
old_readme = readme
readme = readme[:index_start] + quickref + readme[index_end:]
if readme != old_readme:
print(f"Putting quickref into README.md")
with open("README.md", "w") as fp:
fp.write(readme)
def render_gitlogs_example():
"""
Renders the examples/gitlogs.ipynb notebook
"""
export_notebook("examples/gitlogs.ipynb", "rst", os.path.join(DOCS_DIR, "examples"))
def render_plotting_maps_example():
"""
Renders the examples/plotting-maps.ipynb notebook
"""
export_notebook("examples/plotting-maps.ipynb", "rst", os.path.join(DOCS_DIR, "examples"))
def copy_plotlyjs():
"""
Copy the currently installed plotly.min.js to doc/static
:return:
"""
import plotly
code = plotly.offline.get_plotlyjs()
with open(os.path.join(DOCS_DIR, "static", "js", "plotly.min.js"), "w") as fp:
fp.write(code)
if __name__ == "__main__":
args = parse_arguments()
EXECUTE_NOTEBOOKS = args.execute
copy_plotlyjs()
render_quickref()
render_tutorial()
render_gitlogs_example()
render_plotting_maps_example()
|
from authlib.oauth2.rfc6749.resource_protector import *
|
import io
import os
import re
from pathlib import Path
from setuptools import find_packages
from setuptools import setup
# Package metadata
NAME = 'reddit'
DESCRIPTION = "A linear SVC model to classify Reddit posts"
URL = "https://github.com/yusueliu/reddit"
EMAIL = "sue.liu@gmail.com"
AUTHOR = "Sue Liu"
REQUIRES_PYTHON = '>=3.6.0'
# Packages that are required for this module to be executed
def list_reqs(fname='requirements.txt'):
with open(fname) as f:
return f.read().splitlines()
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
# Load the package's __version__.py module as a dictionary
ROOT_DIR = Path(__file__).resolve().parent
PACKAGE_DIR = ROOT_DIR / 'reddit'
README = (ROOT_DIR / "README.md").read_text()
about = {}
with open(PACKAGE_DIR / 'VERSION') as f:
_version = f.read().strip()
about['__version__'] = _version
setup(
name=NAME,
version=about['__version__'],
url=URL,
license='MIT',
author=AUTHOR,
author_email=EMAIL,
description=DESCRIPTION,
long_description=README,
long_description_content_type='text/markdown',
packages=find_packages(exclude=('tests', 'notebooks',)),
package_data={NAME: ['VERSION']},
install_requires=list_reqs(),
extra_require = {},
include_package_data=True,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
|
# Exercise 14
prefetchList = []
def Fibonacci(n):
if n <= len(prefetchList) and prefetchList[n] != 0:
return prefetchList[n]
else:
for i in range(n - len(prefetchList)):
prefetchList.append(0)
if n >= 3:
i = prefetchList[n - 2] = Fibonacci(n - 2)
j = prefetchList[n - 1] = Fibonacci(n - 1)
return i + j
else:
return 1
def main():
print Fibonacci(input("Please input a integer: >>>"))
if __name__ == '__main__':
main()
|
from copy import deepcopy
from plingo import Plingo
def test_zero():
lingo = Plingo()
input = [
[[22, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
expected = [
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
lingo._next_image_data = deepcopy(input)
lingo._height = 3
lingo._width = 3
lingo.execute(show_progressbar=False, iterations=1, save_output=False)
assert lingo._next_image_data == expected
def test_simple():
lingo = Plingo()
input = [
[[22, 1, 1], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
expected = [
[[22, 1, 1], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
lingo._next_image_data = deepcopy(input)
lingo._height = 3
lingo._width = 3
lingo.execute(show_progressbar=False, iterations=1, save_output=False)
assert lingo._next_image_data == expected
input = [
[[22, 1, 1], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
expected = [
[[22, 0, 1], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
lingo._next_image_data = deepcopy(input)
lingo._height = 3
lingo._width = 3
lingo.execute(show_progressbar=False, iterations=1, save_output=False)
assert lingo._next_image_data == expected
input = [
[[22, 1, 1], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 1], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
expected = [
[[22, 1, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 1], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
lingo._next_image_data = deepcopy(input)
lingo._height = 3
lingo._width = 3
lingo.execute(show_progressbar=False, iterations=1, save_output=False)
assert lingo._next_image_data == expected
input = [
[[22, 4, 4], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 1], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
expected = [
[[22, 4, 3], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 1], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
lingo._next_image_data = deepcopy(input)
lingo._height = 3
lingo._width = 3
lingo.execute(show_progressbar=False, iterations=1, save_output=False)
assert lingo._next_image_data == expected
def test_saturation():
lingo = Plingo()
input = [
[[22, 1, 1], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 10, 10], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
expected = [
[[22, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 10, 10], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
lingo._next_image_data = deepcopy(input)
lingo._height = 3
lingo._width = 3
lingo.execute(show_progressbar=False, iterations=1, save_output=False)
assert lingo._next_image_data == expected
|
kaartnummers = open('kaartnummers.txt', 'r')
for regel in kaartnummers.readlines():
regel = regel.rstrip() # Verwijdert /n aan het einde van de regel
line = regel.split(',')
print('{1} heeft kaartnummer: {0}'.format(line[0], line[1]))
kaartnummers.close() |
import collections
import math
from typing import Union, Iterable
# IUGG mean earth radius in kilometers, from
# https://en.wikipedia.org/wiki/Earth_radius#Mean_radius. Using a
# sphere with this radius results in an error of up to about 0.5%.
EARTH_RADIUS = 6371.009
class Coordinates:
def __init__(self, lat: Union[str, int, float], lng: Union[str, int, float]):
self._validate_lat(lat)
self._validate_lng(lng)
self.lat = float(lat)
self.lng = float(lng)
def __getitem__(self, item):
return tuple(self)[item]
def __iter__(self):
return iter((self.lat, self.lng))
def __eq__(self, other):
if not isinstance(other, collections.Iterable):
return NotImplemented
return tuple(self) == tuple(other)
@staticmethod
def _validate_lat(lat):
if not -90 <= float(lat) <= 90:
raise ValueError('Latitude cannot be less than -90 or greater than 90')
@staticmethod
def _validate_lng(lng):
if not -180 <= float(lng) <= 180:
raise ValueError('Longitude cannot be less than -180 or greater than 180')
def great_circle_distance(a: Union[Coordinates, Iterable], b: [Coordinates, Iterable]):
"""
Calculate the great circle distance between two coordinates in planet Earth
based on the formula in this Wikipedia Article:
https://en.wikipedia.org/wiki/Great-circle_distance
Returns:
float: great circle distance in kilometers between the two coordinates
"""
a, b = Coordinates(*a), Coordinates(*b)
lat1, lng1 = math.radians(a.lat), math.radians(a.lng)
lat2, lng2 = math.radians(b.lat), math.radians(b.lng)
sin_lat1, cos_lat1 = math.sin(lat1), math.cos(lat1)
sin_lat2, cos_lat2 = math.sin(lat2), math.cos(lat2)
delta_lng = lng2 - lng1
aux = (sin_lat1 * sin_lat2) + cos_lat1 * cos_lat2 * math.cos(delta_lng)
# To avoid arccos failing for precision errors
if aux > 1 and aux - 1 <= 0.000000001:
aux = 1
central_angle = math.acos(aux)
return EARTH_RADIUS * central_angle
|
"""
Created on May 20, 2019
@author: A. Mehrafsa
"""
# main libraries
import numpy as np # performing operations on CPU
import torch # performing operations on GPU
class GraphUtils(object):
def __init__(self, library="numpy"):
self.version = 1.1
self.multi_arange = None
self.multi_arange_torch = None
self.join = None
self.unique = None
if library == "numpy":
self.multi_arange = GraphUtils.multi_arange_numpy
self.join = GraphUtils.join_numpy
self.unique = GraphUtils.unique_numpy_bin_count
elif library == "torch-cpu":
self.multi_arange_torch = GraphUtils.multi_arange_torch_cpu
self.multi_arange = self.multi_arange_torch
self.join = self.join_torch
self.unique = GraphUtils.unique_torch_bin_count
elif library == "torch-gpu" or library == "torch-cuda":
self.multi_arange_torch = GraphUtils.multi_arange_torch_gpu
self.multi_arange = self.multi_arange_torch
self.join = self.join_torch
self.unique = GraphUtils.unique_torch_bin_count
else:
raise RuntimeError("Unknown library passed to GraphUtils module!")
@staticmethod
def join_numpy(src, dst, deg, nei):
"""
This will return all two-hop passes from src to des vectors
:param src: source nodes vector in edge list
:param dst: destination nodes vector in edge list
:param deg: degree of nodes
:param nei: Index array for where each block of neighbors start
:return: list of (st, dn) pairs connected by any intermediate nodes
"""
st = np.repeat(src, deg[dst]) # source nodes repeat
m = GraphUtils.multi_arange_numpy(nei[dst], deg[dst])
dn = dst[m] # destination neighbors
return st, dn
def join_torch(self, src, dst, deg, nei):
st = torch.repeat_interleave(src, deg[dst]) # source nodes repeat
m = self.multi_arange_torch(nei[dst], deg[dst])
dn = dst[m] # destination neighbors
return st, dn
@staticmethod
def multi_arange_nei(start, count):
nnz = np.nonzero(count)
start = start[nnz]
count = count[nnz]
arr_len = np.sum(count)
# building reset indices
nei = np.zeros(count.shape[0]+1, dtype=count.dtype)
nei[1:] = np.cumsum(count)
ri = (nei[:-1]).copy()
# building incremental indices
# This vector require to be long to handle negative numbers properly
incr = np.ones(arr_len.item(), dtype=np.long)
incr[ri] = start
# correcting start indices for initial values
# np.add.at(incr, ri[1:], 1 - (start[:-1] + count[:-1]))
incr[ri[1:]] += 1 - (start[:-1] + count[:-1]).astype(np.long)
# typecast to normal data type
return np.cumsum(incr).astype(start.dtype), nei
@staticmethod
def multi_arange_numpy(start, count):
nnz = np.nonzero(count)
start = start[nnz]
count = count[nnz]
arr_len = np.sum(count)
# building reset indices
ri = np.zeros(count.shape[0], dtype=count.dtype)
ri[1:] = np.cumsum(count)[:-1]
# building incremental indices
# This vector require to be long to handle negative numbers properly
incr = np.ones(arr_len.item(), dtype=np.long)
incr[ri] = start
# correcting start indices for initial values
# np.add.at(incr, ri[1:], 1 - (start[:-1] + count[:-1]))
incr[ri[1:]] += 1 - (start[:-1] + count[:-1]).astype(np.long)
# typecast to normal data type
return np.cumsum(incr).astype(start.dtype)
@staticmethod
def multi_arange_torch_ex(start, count, device):
nnz = torch.nonzero(count).reshape(-1)
start = start[nnz]
count = count[nnz]
arr_len = torch.sum(count)
# building reset indices
ri = torch.zeros(count.shape[0], dtype=count.dtype, device=device, requires_grad=False)
ri[1:] = torch.cumsum(count, dim=0)[:-1]
# building incremental indices
# This vector require to be long to handle negative numbers properly
incr = torch.ones(arr_len.item(), dtype=torch.int64, device=device, requires_grad=False)
incr[ri] = start
# correcting start indices for initial values
# torch.add.at(incr, ri[1:], 1 - (start[:-1] + count[:-1]))
incr[ri[1:]] += 1 - (start[:-1] + count[:-1]).long()
return torch.cumsum(incr, dim=0).type(start.dtype)
@staticmethod
def multi_arange_torch_cpu(start, count):
return GraphUtils.multi_arange_torch_ex(start, count, 'cpu')
@staticmethod
def multi_arange_torch_gpu(start, count):
return GraphUtils.multi_arange_torch_ex(start, count, 'cuda')
@staticmethod
def unique_numpy_bin_count(vec):
q = np.bincount(vec).astype(vec.dtype)
u = np.nonzero(q)[0].astype(vec.dtype) # nonzero return tuple of arrays
return u, q[u]
@staticmethod
def unique_torch_bin_count(vec):
q = torch.bincount(vec)
u = torch.nonzero(q).reshape(-1) # return 2d array
return u, q[u]
@staticmethod
def cantor_pairing(s, d):
"""
Cantor pairing function:
$\mathbb{N}\times\mathbb{N}\rightarrow\mathbb{N}$
$\pi(a,b) = \frac{1}{2}(a+b)(a+b+1) + b$
:param s: first vector
:param d: second vector
:return: pairing values
"""
return (1 / 2 * (s + d) * (s + d + 1)).astype(s.dtype) + d
@staticmethod
def associative_pairing(s, d):
"""
Producing unique number given two interchangable integers (associative pairing)
$f(a,b) = \dfrac{\max(a,b)(\max(a,b)+1)}{2}+\min(a,b)$
This satisfies $f(a,b)=f(b,a)$ and grows quadratically with $max(a,b)$.
Growth pattern:
* $f(0,0) = 0$
* $f(1,0) = 1, f(1,1) = 2$
* $f(2,0) = 3, f(2,1) = 4, f(2,2) = 5$
* $f(3,0) = 6, f(3,1) = 7, f(3,2) = 8, f(3,3) = 9$
:param s: first vector
:param d: second vector
:return: pairing values
"""
m = np.maximum(s, d)
n = np.minimum(s, d)
return (m * (m + 1) / 2).astype(m.dtype) + n
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for Graphy and Google Chart API backend."""
import string
import unittest
from graphy import graphy_test
from graphy.backends.google_chart_api import util
class SimpleEncoderTest(graphy_test.GraphyTest):
def setUp(self):
self.simple = util.SimpleDataEncoder()
def testEmpty(self):
self.assertEqual('', self.simple.Encode([]))
def testSingle(self):
self.assertEqual('A', self.simple.Encode([0]))
def testFull(self):
full = string.ascii_uppercase + string.ascii_lowercase + string.digits
self.assertEqual(full, self.simple.Encode(range(0, 62)))
def testRoundingError(self):
"""Scaling might give us some rounding error. Make sure that the encoder
deals with it properly.
"""
a = [-1, 0, 0, 1, 60, 61, 61, 62]
b = [-0.999999, -0.00001, 0.00001, 0.99998,
60.00001, 60.99999, 61.00001, 61.99998]
self.assertEqual(self.simple.Encode(a), self.simple.Encode(b))
def testFloats(self):
ints = [1, 2, 3, 4]
floats = [1.1, 2.1, 3.1, 4.1]
self.assertEqual(self.simple.Encode(ints), self.simple.Encode(floats))
def testOutOfRangeDropped(self):
"""Confirm that values outside of min/max are left blank."""
nums = [-79, -1, 0, 1, 61, 62, 1012]
self.assertEqual('__AB9__', self.simple.Encode(nums))
def testNoneDropped(self):
"""Confirm that the value None is left blank."""
self.assertEqual('_JI_H', self.simple.Encode([None, 9, 8, None, 7]))
class EnhandedEncoderTest(graphy_test.GraphyTest):
def setUp(self):
self.encoder = util.EnhancedDataEncoder()
def testEmpty(self):
self.assertEqual('', self.encoder.Encode([]))
def testFull(self):
full = ''.join(self.encoder.code)
self.assertEqual(full, self.encoder.Encode(range(0, 4096)))
def testOutOfRangeDropped(self):
nums = [-79, -1, 0, 1, 61, 4096, 10012]
self.assertEqual('____AAABA9____', self.encoder.Encode(nums))
def testNoneDropped(self):
self.assertEqual('__AJAI__AH', self.encoder.Encode([None, 9, 8, None, 7]))
class ScaleTest(graphy_test.GraphyTest):
"""Test scaling."""
def testScaleIntegerData(self):
scale = util.ScaleData
# Identity
self.assertEqual([1, 2, 3], scale([1, 2, 3], 1, 3, 1, 3))
self.assertEqual([-1, 0, 1], scale([-1, 0, 1], -1, 1, -1, 1))
# Translate
self.assertEqual([4, 5, 6], scale([1, 2, 3], 1, 3, 4, 6))
self.assertEqual([-3, -2, -1], scale([1, 2, 3], 1, 3, -3, -1))
# Scale
self.assertEqual([1, 3.5, 6], scale([1, 2, 3], 1, 3, 1, 6))
self.assertEqual([-6, 0, 6], scale([1, 2, 3], 1, 3, -6, 6))
# Scale and Translate
self.assertEqual([100, 200, 300], scale([1, 2, 3], 1, 3, 100, 300))
def testScaleDataWithDifferentMinMax(self):
scale = util.ScaleData
self.assertEqual([1.5, 2, 2.5], scale([1, 2, 3], 0, 4, 1, 3))
self.assertEqual([-2, 2, 6], scale([0, 2, 4], 1, 3, 0, 4))
def testScaleFloatingPointData(self):
scale = util.ScaleData
data = [-3.14, -2.72, 0, 2.72, 3.14]
scaled_e = 5 + 5 * 2.72 / 3.14
expected_data = [0, 10 - scaled_e, 5, scaled_e, 10]
actual_data = scale(data, -3.14, 3.14, 0, 10)
for expected, actual in zip(expected_data, actual_data):
self.assertAlmostEqual(expected, actual)
def testScaleDataOverRealRange(self):
scale = util.ScaleData
self.assertEqual([0, 30.5, 61], scale([1, 2, 3], 1, 3, 0, 61))
def testScalingLotsOfData(self):
data = range(0, 100)
expected = range(-100, 100, 2)
actual = util.ScaleData(data, 0, 100, -100, 100)
self.assertEqual(expected, actual)
class NameTest(graphy_test.GraphyTest):
"""Test long/short parameter names."""
def testLongNames(self):
params = dict(size='S', data='D', chg='G')
params = util.ShortenParameterNames(params)
self.assertEqual(dict(chs='S', chd='D', chg='G'), params)
def testCantUseBothLongAndShortName(self):
"""Make sure we don't let the user specify both the long and the short
version of a parameter. (If we did, which one would we pick?)
"""
params = dict(size='long', chs='short')
self.assertRaises(KeyError, util.ShortenParameterNames, params)
if __name__ == '__main__':
unittest.main()
|
import pandas as pd
import os
from decimal import Decimal
# from sklearn.cluster import KMeans,DBSCAN
import datetime,json,time,random
from math import radians, cos, sin, asin, sqrt,degrees
from impala.dbapi import connect
from sqlalchemy import create_engine
import MySQLdb
from collections import OrderedDict
from retrying import retry
import redis
from core.conf import config
from core.redis_helper import Logger_Redis,RedisHelper
pd.set_option('display.width', 400)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('display.max_columns', 70)
# def init_model(epsilon,min_samples,K=1,n_jobs=1):
# from sklearn.cluster import KMeans,DBSCAN
# kmeans_model=KMeans(n_clusters=K, random_state=0,n_jobs=n_jobs)
# dbscan_model = DBSCAN(eps=epsilon, min_samples=min_samples, n_jobs=n_jobs)
# return (kmeans_model, dbscan_model)
class Busycell_calc(object):
def __init__(self,channel='test',file_path='test.xlsx',task_id=None,file_path_id=None,radius=300,min_samples=200,K=1,n_jobs=1):
self.i = 0
self.j = 0
self.count = 0
self.radius = radius
self.min_samples = min_samples
self.K = K
self.n_jobs = n_jobs
self.key = channel
self.task_id = task_id
self.file_path = file_path
self.redis_helper=RedisHelper(self.key)
self.conn=connect(host='133.21.254.163', port=21050,database='hub_yuan', timeout=30)
self.r = redis.Redis(host=config.host2, port=config.port2, db=config.db6)
self.engine = create_engine('mysql+mysqldb://root:password@10.39.211.198:3306/busycell?charset=utf8')
self.db = MySQLdb.connect("10.39.211.198", "root", "password", "busycell", charset='utf8')
self.kmeans_model = self.init_model[0]
self.dbscan_model = self.init_model[1]
# self.df1=self.get_busy_df
self.df2=pd.read_csv('基础信息表.csv',encoding='gbk')
self.df3=pd.read_csv('负荷表.csv',encoding='gbk',usecols=['enbid','cellid','pdcp_up_flow','pdcp_down_flow','prb_percent'])
self.df_info=self.get_df_info
# self.df_busy_info=self.get_df_busy_info
def get_busy_df(self):
start = datetime.datetime.now()
# self.creatTable('超忙小区.xlsx')
self.redis_helper.public('<b>【任务开始】 taskID:</b>{0}'.format(self.task_id))
self.redis_helper.public('正在查询impala表。。。')
df1=pd.read_excel(self.file_path, usecols=['province', 'city', 'enbid', 'cellid'])
data = df1.apply(lambda x: '%d_%d' % (x['enbid'], x['cellid']), axis=1).values.tolist()
data_str = str(data)
sql1 = '''select enbid,cellid,group_concat(cast(longitude as string),',') as lng_set,group_concat(cast(latitude as string),',') as lat_set
from (
select cast(enb_id as int) as enbid, cast(cell_no as int) as cellid,longitude, latitude, city_name as city
from lte_hd.clt_mr_all_mro_l
where concat(cast(enb_id as string),'_',cell_no) in (%s) and
year=2018 and month=8 and day=1 and hour=11 and longitude is not null and latitude is not null
) t
GROUP BY enbid,cellid''' % (data_str[1:-1])
df2 = pd.read_sql(sql1, self.conn)
df = pd.merge(df1, df2, how='left', on=['enbid', 'cellid'])[['city', 'enbid', 'cellid','lng_set','lat_set']]
self.count = df.shape[0]
self.redis_helper.public('impala表查询完成!')
end = datetime.datetime.now()
self.redis_helper.public('impala表查询耗时 %ss' % (end - start).seconds)
return df
# sql = '''select city,enbid,cellid,group_concat(cast(longitude as string),',') as lng_set,group_concat(cast(latitude as string),',') as lat_set
# from (
# select start_time,enbid, cellid,longitude, latitude, city
# from lte_hd.clt_mr_all_mro_l m
# right join hub_yuan.lte_busy_cell n
# on m.enb_id=n.enbid and m.cell_no=cast(n.cellid as string) and m.year=2018 and m.month=8 and m.day=1 and m.hour=11
# ) t
# GROUP BY city,enbid,cellid'''
# df5 = pd.read_sql(sql, self.conn)
# # df5=df5[(df5['enbid']==602668) & (df5['cellid']==53)]
# self.count = df5.shape[0]
# print(self.count)
# self.redis_helper.public('impala表查询完成!')
# end = datetime.datetime.now()
# self.redis_helper.public('impala表查询耗时 %ss' % (end - start).seconds)
# return df5
@property
def init_model(self):
from sklearn.cluster import KMeans, DBSCAN
kmeans_model = KMeans(n_clusters=self.K, random_state=0, n_jobs=self.n_jobs)
dbscan_model = DBSCAN(eps=self.radius/100000, min_samples=self.min_samples, n_jobs=self.n_jobs)
return (kmeans_model, dbscan_model)
@property
def get_df_info(self):
return pd.merge(self.df2, self.df3, how='left', on=['enbid', 'cellid'])
def get_df_busy_info(self):
df1=self.get_busy_df()
return pd.merge(df1,self.df2,how='left',on=['enbid','cellid'])
# @retry(stop_max_delay=1000*5)
# def creatTable(self,path_busycell):
# start = datetime.datetime.now()
# # print(path_busycell,1)
# # df = pd.read_excel(path_busycell)
# # print(df.columns)
# df = pd.read_excel(path_busycell, usecols=['province', 'city', 'enbid', 'cellid'])
# self.count=df.shape[0]
# data = [tuple(d) for d in df.values.tolist()]
# print(str(data)[1:-1])
# sql_create = '''
# create table IF NOT EXISTS lte_busy_cell
# (
# province STRING,
# city STRING,
# enbid INT,
# cellid INT
# )
# ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
# '''
# sql_insert = 'insert overwrite lte_busy_cell values {}'.format(str(data)[1:-1])
# # conn = connect(host='133.21.254.164', port=21050, database='hub_yuan')
# cursor = self.conn.cursor()
# cursor.execute(sql_create)
# cursor.execute(sql_insert)
# end = datetime.datetime.now()
# self.redis_helper.public('创建impala表完成,耗时 %s'%(end-start).seconds)
# print('创建表耗时:', end - start)
def parse_distance(self,row):
self.i+=1
# if self.i%100==0:
self.redis_helper.public('开始搜索附近基站 %s个,完成 <b>%d%%</b>' %(self.i,self.i/self.count*100))
# r = redis.Redis(host=config.host2, port=config.port2, db=config.db6)
self.r.hset(self.task_id, "stage1PercentComplete", '%d'%(self.i/self.count*100))
# r.set(self.task_id,(self.i/self.count*100))
# time.sleep(random.random()/100)
df_info = self.df_info
enbid1, cellid1, freqID1 = row.enbid, row.cellid, row.freqID
x1, y1 = row.lng, row.lat
# print(x1,y1)
d = 0.3
r = 6371.393
dlng = 2 * asin(sin(d / (2 * r)) / cos(y1))
dlng = degrees(dlng) # 弧度转换成角度
dlat = d / r
dlat = degrees(dlat)
minlng = x1 - dlng
maxlng = x1 + dlng
minlat = y1 - dlat
maxlat = y1 + dlat
res = OrderedDict()
res['是否高负荷'] = False
res['同站点是否可扩载频'] = True
res_df = df_info[
(df_info.lng > minlng) & (df_info.lng < maxlng) & (df_info.lat > minlat) & (df_info.lat < maxlat) & (
enbid1 != df_info.enbid)]
# res_df = res_df.where(res_df.notnull(), None)
data_rectangle = res_df.to_dict(orient='records')
# print(res_df)
# res_df=res_df.round({'lng':6,'lat':6})
# data_rectangle = res_df.iterrows()
data_cricle = []
for r in data_rectangle:
# if isinstance(r['lng'], float) and isinstance(r['lng'], float):
r['lng'] = float(Decimal(r['lng']).quantize(Decimal('0.000000')))
r['lat'] = float(Decimal(r['lat']).quantize(Decimal('0.000000')))
x2, y2 = r['lng'], r['lat']
# print(x2,y2)
pdcp_up_flow = r['pdcp_up_flow']
pdcp_down_flow = r['pdcp_down_flow']
prb_percent = r['prb_percent']
freqID2 = r['freqID'] #邻区载频
distance = round(geodistance(x1, y1, x2, y2),2)
if distance < d * 1000:
r['距离'] = '%.1f米'%distance
# r = r.where(r.notnull(), None).to_dict()
# r=r.to_dict()
# print(r)
data_cricle.append(r)
if pdcp_up_flow > 20 and pdcp_down_flow > 80 and prb_percent > 7:
r['是否高负荷'] = True
res['是否高负荷'] = True
if distance == 0 and freqID2 == freqID1:
r['同站点是否可扩载频'] = False
res['同站点是否可扩载频'] = False
else:
r['同站点是否可扩载频'] = True
# res['同站点是否可扩载频'] = True
else:
r['是否高负荷'] = False
res['data'] = data_cricle
return res
# return json.dumps(res, ensure_ascii=False)
def parse_stage2(self,row):
dbscan_model = self.dbscan_model
kmeans_model = self.kmeans_model
self.j += 1
self.redis_helper.public('进行聚类分析,完成<b>%d%%</b>' % (self.j / self.count * 100))
self.r.hset(self.task_id, "stage2PercentComplete", '%d'%(self.j / self.count * 100))
# self.redis_helper.public('开始聚类分析第%s个' % self.j)
# model = self.model
res = OrderedDict()
res['是否高负荷'] = False
res['同站点是否可扩载频'] = True
res['是否室分站'] = True
lng_set = row['lng_set']
lat_set = row['lat_set']
# print('lng_set:',type(lng_set),lng_set)
# data = json.loads(row['是否邻小区'])
data=row['是否邻小区']
indoor = row['indoor']
isBusy = data['是否高负荷']
isFreq = data['同站点是否可扩载频']
freq = row['freqID']
scene = row['scene']
# data = pd.DataFrame({'lng': row['lng_set'].split(','), 'lat': row['lat_set'].split(',')})
# print(data)
# model.fit(data)
# centroid = model.cluster_centers_
# print(centroid)
# centre_point = centroid.tolist()[0]
# # print(centre_point)
# res['centre'] = centre_point
if isBusy:
res['是否高负荷'] = True
if not isFreq:
res['同站点是否可扩载频'] = False
if indoor == '否':
res['是否室分站'] = False
if not freq == 5:
if isinstance(lng_set,str) and isinstance(lat_set,str):
data = pd.DataFrame({'lng': row['lng_set'].split(','), 'lat': row['lat_set'].split(',')},columns=['lng','lat'])
# print("\033[0;31m%s\033[0m" % "输出红色字符")
# print(data)
# 默认参数 epsilon=0.001, min_samples=200
radius = 300
epsilon = radius / 100000
# epsilon = 0.003
min_samples = 100
y_pred = dbscan_model.fit_predict(data)
n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0) # 获取分簇的数目
if n_clusters_ < 1:
kmeans_model.fit(data)
centroid = kmeans_model.cluster_centers_
centres_cluster=centroid.tolist()[0]
else:
centres_cluster = []
for i in range(n_clusters_):
print('簇 ', i, '的所有样本:')
one_cluster = data[y_pred == i]
kmeans_model.fit(one_cluster)
centroid = kmeans_model.cluster_centers_
centres_cluster.append(centroid.tolist()[0])
res['centre'] = centres_cluster
return '建站点为:'+ str(centres_cluster)
# model.fit(data)
# centroid = model.cluster_centers_
# print(centroid)
# centre_point = centroid.tolist()[0]
# # print(centre_point)
# res['centre'] = centre_point
# return '非800M站点'
else:
if scene == '市区':
return '优先考虑优化手段'
else:
if isinstance(lng_set,str) and isinstance(lat_set,str):
data = pd.DataFrame({'lng': row['lng_set'].split(','), 'lat': row['lat_set'].split(',')},columns=['lng','lat'])
# print(data)
# 默认参数 epsilon=0.001, min_samples=200
radius = 300
epsilon = radius / 100000
# epsilon = 0.003
min_samples = 100
y_pred = dbscan_model.fit_predict(data)
n_clusters_ = len(set(y_pred)) - (1 if -1 in y_pred else 0) # 获取分簇的数目
if n_clusters_ < 1:
kmeans_model.fit(data)
centroid = kmeans_model.cluster_centers_
centres_cluster = centroid.tolist()[0]
return '新增L800M小区,建站点为:',str(centres_cluster)
else:
centres_cluster = []
for i in range(n_clusters_):
print('簇 ', i, '的所有样本:')
one_cluster = data[y_pred == i]
kmeans_model.fit(one_cluster)
centroid = kmeans_model.cluster_centers_
centres_cluster.append(centroid.tolist()[0])
return '用1.8G或者2.1G吸收,建站点为:'+ str(centres_cluster)
# model.fit(data)
# centroid = model.cluster_centers_
# centre_point = centroid.tolist()[0]
# # print(centre_point)
# res['密集点'] = centre_point
# return '800M站点'
else:
return '新增室分系统或采用有源天线系统'
else:
return '原站点扩载频'
else:
return '优化方法负载均衡'
# return json.dumps(res, ensure_ascii=False)
# return res
def run(self):
df_busy_info = self.get_df_busy_info()
engine = self.engine
self.redis_helper.public('stage1:邻区搜索')
df_busy_info['是否邻小区'] = df_busy_info.apply(self.parse_distance, axis=1)
self.redis_helper.public('stage2:聚类分析')
df_busy_info['result'] = df_busy_info.apply(self.parse_stage2, axis=1)
df_busy_info['n_cell'] = df_busy_info.apply(lambda x:x['是否邻小区']['data'], axis=1)
df_busy_info.drop(['是否邻小区'], axis=1, inplace=True)
df_busy_info.drop(['lng_set', 'lat_set'], axis=1, inplace=True)
now = datetime.datetime.now()
df_busy_info['finish_time'] = now
df_busy_info_copy = df_busy_info.copy()
# df_busy_info.to_excel('result.xlsx')
self.redis_helper.public('正在导入mysql..')
print('导入mysql..')
# df_busy_info['是否邻小区']=df_busy_info.apply(lambda x:json.dumps(x['是否邻小区'],ensure_ascii=False),axis=1)
# df_busy_info['结果'] = df_busy_info.apply(lambda x: json.dumps(x['结果'], ensure_ascii=False),axis=1)
# df_busy_info['是否邻小区']=df_busy_info['是否邻小区'].map(lambda x: json.dumps(x, ensure_ascii=False))
# 保存到文件
timestr = now.strftime("%Y%m%d%H%M%S")
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
download_dir = os.path.join(BASE_DIR, 'download')
filename = str(os.path.split(self.file_path)[1].split('.')[0]) +'_'+ timestr
# filename = str(os.path.basename(self.file_path).split('.')[0]) +'_'+ timestr
# download_url = download_dir + filename
download_url=os.path.join(download_dir,filename)
print(download_dir)
if not os.path.exists(download_dir):
os.makedirs(download_dir)
df_busy_info_copy.to_excel(download_url+ '.xlsx',index=None)
df_busy_info_copy.to_csv(download_url+ '.csv',index=None)
print('已保存到本地excel')
# df_busy_info_copy['是否邻小区'] = df_busy_info_copy['是否邻小区'].map(lambda x: json.dumps(x, ensure_ascii=False))
df_busy_info['result']=df_busy_info['result'].map(lambda x: json.dumps(x, ensure_ascii=False))
df_busy_info['n_cell'] = df_busy_info['n_cell'].map(lambda x: json.dumps(x, ensure_ascii=False))
# df_busy_info['lng_set'] = df_busy_info['lng_set'].map(lambda x: json.dumps(x, ensure_ascii=False))
# df_busy_info['lat_set'] = df_busy_info['lat_set'].map(lambda x: json.dumps(x, ensure_ascii=False))
df_busy_info.to_sql('busycell', con=engine, if_exists='append')
df_btsinfo=df_busy_info[['city','enbid','cellid','cellname','freqID','scene','indoor']]
df_btsinfo.drop_duplicates(inplace=True)
df_btsinfo = df_btsinfo.where(df_btsinfo.notnull(), None)
rows=df_btsinfo.values.tolist()
cursor = self.db.cursor()
sql = "insert ignore into btsinfo (city,enbid,cellid,cellname,freqID,scene,indoor) VALUES(%s,%s,%s,%s,%s,%s,%s)"
try:
cursor.executemany(sql, rows)
except Exception as e:
self.db.rollback()
print("执行MySQL: %s 时出错:%s" % (sql, e))
self.db.commit()
cursor.close()
self.db.close()
self.redis_helper.public('导入mysql成功!')
self.redis_helper.public('end')
return df_busy_info,download_url
def geodistance(lng1,lat1,lng2,lat2):
lng1, lat1, lng2, lat2 = map(radians, [lng1, lat1, lng2, lat2])
dlon=lng2-lng1
dlat=lat2-lat1
a=sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
r = 6371.393
dis=2*asin(sqrt(a))*r*1000.0
return dis
# busycell=Busycell_calc()
# def run():
# df_busy_info=busycell.df_busy_info
# engine=busycell.engine
# df_busy_info['是否邻小区'] = df_busy_info.apply(parse_distance, axis=1)
# print(df_busy_info)
# df_busy_info['结果'] = df_busy_info.apply(parse_stage2, axis=1)
# df_busy_info.drop(['lng_set', 'lat_set'], axis=1, inplace=True)
# now = datetime.datetime.now()
# df_busy_info['日期'] = now
# df_busy_info.to_excel('result.xlsx')
# print('导入mysql..')
# df_busy_info.to_sql('busycell', con=engine, if_exists='append')
if __name__=='__main__':
start=datetime.datetime.now()
busycell = Busycell_calc()
busycell.run()
end=datetime.datetime.now()
print('总共耗时:',end-start)
|
#!/usr/bin/python
#
# Checks files to make sure they conform to LLVM37 standards which can be applied
# to any programming language: at present, line length and trailing whitespace.
import common_lint
import sys
class GenericCodeLint(common_lint.BaseLint):
MAX_LINE_LENGTH = 80
def RunOnFile(self, filename, lines):
common_lint.VerifyLineLength(filename, lines,
GenericCodeLint.MAX_LINE_LENGTH)
common_lint.VerifyTrailingWhitespace(filename, lines)
def GenericCodeLintMain(filenames):
common_lint.RunLintOverAllFiles(GenericCodeLint(), filenames)
return 0
if __name__ == '__main__':
sys.exit(GenericCodeLintMain(sys.argv[1:]))
|
import pytest
from tests.integration.helpers import (
run_command_and_detect_errors,
)
@pytest.mark.parametrize(
'command',
(
# ropsten
('trinity', '--ropsten',),
)
)
@pytest.mark.asyncio
async def test_does_not_throw_errors_on_long_run(async_process_runner, command):
# Ensure that no errors are thrown when trinity is run for 90 seconds
await run_command_and_detect_errors(async_process_runner, command, 90)
|
"""Matrix spiral traversal library.
Get a square matrix from a remote server and return the result of spiral
traversal of the matrix: counterclockwise, starting from the upper left corner
"""
import logging
from typing import List
from aiohttp import ClientError, ClientSession
from mattrav.exceptions import FormatMatrixExceptions, GetMatrixException
LOGGER = logging.getLogger(__name__)
async def get_matrix(url: str, raise_on_error: bool = False) -> List[int]:
"""Get result of traversal of the matrix downloaded from a remote server.
Args:
url: remote server url
raise_on_error: flag of exceptions raise of network access
Returns:
result of the matrix traversal
Raises:
GetMatrixException: exception of network access
"""
try: # noqa: WPS229
graphical_matrix = await download_matrix(url)
matrix = parse_matrix(graphical_matrix)
except ClientError as exc:
if raise_on_error:
raise GetMatrixException("Download can't be finished") from exc
LOGGER.warning(exc)
return []
except FormatMatrixExceptions as exc:
if raise_on_error:
raise
LOGGER.warning(exc)
return []
return traverse_matrix(matrix)
async def download_matrix(url: str) -> str:
"""Download graphic matrix from a remote server.
Args:
url: remote server url
Returns:
graphic matrix
"""
async with ClientSession() as session:
async with session.get(url) as resp:
resp.raise_for_status()
return await resp.text()
def traverse_matrix(source_matrix: List[List[int]]) -> List[int]:
"""Collect matrix values.
Traverse in a spiral counterclockwise,
starting from the upper left corner
Args:
source_matrix: the matrix in list format
Returns:
result of the matrix traversal
"""
traverse: List[int] = []
matrix = list(zip(*source_matrix))
while matrix:
traverse.extend(matrix.pop(0))
matrix = list(zip(*matrix))[::-1]
return traverse
def parse_matrix(graphical_matrix: str) -> List[List[int]]: # noqa: WPS210
"""Parse graphical matrix values.
Args:
graphical_matrix: graphical representation of the matrix
Returns:
the matrix in list format
Raises:
FormatMatrixExceptions: exception of matrix source format
"""
if not graphical_matrix:
raise FormatMatrixExceptions('Unexpected matrix format')
matrix_lines = graphical_matrix.split('\n')
upper_borderline = matrix_lines[0]
size = len([char for char in upper_borderline.split('+') if char != ''])
matrix = [
[
int(char)
for char in line.split(' ')
if char.isdigit()
]
for index, line in enumerate(matrix_lines)
if index % 2 == 1 and line
]
bad_lines = list(filter(lambda line: len(line) != size, matrix))
if bad_lines or len(matrix) != size:
raise FormatMatrixExceptions('Unexpected matrix format')
return matrix
|
mindsdb_version="0.8.3" |
import json
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Union
import numpy as np
from tqdm import tqdm
from upolygon import draw_polygon
import darwin.datatypes as dt
from darwin.config import Config
from darwin.exceptions import OutdatedDarwinJSONFormat, UnsupportedFileType
SUPPORTED_IMAGE_EXTENSIONS = [".png", ".jpeg", ".jpg", ".jfif", ".tif", ".tiff", ".bmp", ".svs"]
SUPPORTED_VIDEO_EXTENSIONS = [".avi", ".bpm", ".dcm", ".mov", ".mp4"]
SUPPORTED_EXTENSIONS = SUPPORTED_IMAGE_EXTENSIONS + SUPPORTED_VIDEO_EXTENSIONS
def is_extension_allowed(extension):
return extension.lower() in SUPPORTED_EXTENSIONS
def is_image_extension_allowed(extension):
return extension.lower() in SUPPORTED_IMAGE_EXTENSIONS
def is_video_extension_allowed(extension):
return extension.lower() in SUPPORTED_VIDEO_EXTENSIONS
if TYPE_CHECKING:
from darwin.client import Client
def urljoin(*parts: str) -> str:
"""Take as input an unpacked list of strings and joins them to form an URL"""
return "/".join(part.strip("/") for part in parts)
def is_project_dir(project_path: Path) -> bool:
"""Verifies if the directory is a project from Darwin by inspecting its sturcture
Parameters
----------
project_path : Path
Directory to examine
Returns
-------
bool
Is the directory a project from Darwin?
"""
return (project_path / "releases").exists() and (project_path / "images").exists()
def is_deprecated_project_dir(project_path: Path) -> bool:
"""Verifies if the directory is a project from Darwin that uses a deprecated local structure
Parameters
----------
project_path : Path
Directory to examine
Returns
-------
bool
Is the directory a project from Darwin?
"""
return (project_path / "annotations").exists() and (project_path / "images").exists()
def get_progress_bar(array: List, description: Optional[str] = None):
pbar = tqdm(array)
pbar.set_description(desc=description, refresh=True)
return pbar
def prompt(msg: str, default: Optional[str] = None) -> str:
"""Prompt the user on a CLI to input a message
Parameters
----------
msg : str
Message to print
default : str
Default values which is put between [] when the user is prompted
Returns
-------
str
The input from the user or the default value provided as parameter if user does not provide one
"""
if default:
msg = f"{msg} [{default}]: "
else:
msg = f"{msg}: "
result = input(msg)
if not result and default:
return default
return result
def find_files(
files: List[Union[str, Path]] = [], recursive: bool = True, files_to_exclude: List[Union[str, Path]] = []
) -> List[Path]:
"""Retrieve a list of all files belonging to supported extensions. The exploration can be made
recursive and a list of files can be excluded if desired.
Parameters
----------
files: List[Union[str, Path]]
List of files that will be filtered with the supported file extensions and returned
recursive : bool
Flag for recursive search
files_to_exclude : List[Union[str, Path]]
List of files to exclude from the search
Returns
-------
list[Path]
List of all files belonging to supported extensions. Can't return None.
"""
# Init the return value
found_files = []
pattern = "**/*" if recursive else "*"
for path in map(Path, files):
if path.is_dir():
found_files.extend([f for f in path.glob(pattern) if is_extension_allowed(f.suffix)])
elif is_extension_allowed(path.suffix):
found_files.append(path)
else:
raise UnsupportedFileType(path)
# Filter the list and return it
files_to_exclude = set(files_to_exclude)
return [f for f in found_files if f.name not in files_to_exclude and str(f) not in files_to_exclude]
def secure_continue_request() -> bool:
"""Asks for explicit approval from the user. Empty string not accepted"""
return input("Do you want to continue? [y/N] ") in ["Y", "y"]
def persist_client_configuration(
client: "Client", default_team: Optional[str] = None, config_path: Optional[Path] = None
) -> Config:
"""Authenticate user against the server and creates a configuration file for it
Parameters
----------
client : Client
Client to take the configurations from
config_path : Path
Optional path to specify where to save the configuration file
Returns
-------
Config
A configuration object to handle YAML files
"""
if not config_path:
config_path = Path.home() / ".darwin" / "config.yaml"
config_path.parent.mkdir(exist_ok=True)
team_config = client.config.get_default_team()
config = Config(config_path)
config.set_team(team=team_config["slug"], api_key=team_config["api_key"], datasets_dir=team_config["datasets_dir"])
config.set_global(api_endpoint=client.url, base_url=client.base_url, default_team=default_team)
return config
def get_local_filename(metadata: dict):
return metadata["filename"]
def parse_darwin_json(path: Union[str, Path], count: int):
path = Path(path)
with path.open() as f:
data = json.load(f)
if not data["annotations"]:
return None
if "fps" in data["image"] or "frame_count" in data["image"]:
return parse_darwin_video(path, data, count)
else:
return parse_darwin_image(path, data, count)
def parse_darwin_image(path, data, count):
annotations = list(filter(None, map(parse_darwin_annotation, data["annotations"])))
annotation_classes = set([annotation.annotation_class for annotation in annotations])
return dt.AnnotationFile(
path,
get_local_filename(data["image"]),
annotation_classes,
annotations,
False,
data["image"]["width"],
data["image"]["height"],
data["image"]["url"],
data["image"].get("workview_url"),
data["image"].get("seq", count),
)
def parse_darwin_video(path, data, count):
annotations = list(filter(None, map(parse_darwin_video_annotation, data["annotations"])))
annotation_classes = set([annotation.annotation_class for annotation in annotations])
if "width" not in data["image"] or "height" not in data["image"]:
raise OutdatedDarwinJSONFormat("Missing width/height in video, please re-export")
return dt.AnnotationFile(
path,
get_local_filename(data["image"]),
annotation_classes,
annotations,
True,
data["image"]["width"],
data["image"]["height"],
data["image"]["url"],
data["image"].get("workview_url"),
data["image"].get("seq", count),
data["image"]["frame_urls"],
)
def parse_darwin_annotation(annotation: dict):
name = annotation["name"]
main_annotation = None
if "polygon" in annotation:
if "additional_paths" in annotation["polygon"]:
paths = [annotation["polygon"]["path"]] + annotation["polygon"]["additional_paths"]
main_annotation = dt.make_complex_polygon(name, paths)
else:
main_annotation = dt.make_polygon(name, annotation["polygon"]["path"])
elif "complex_polygon" in annotation:
if "additional_paths" in annotation["complex_polygon"]:
paths = annotation["complex_polygon"]["path"] + annotation["complex_polygon"]["additional_paths"]
main_annotation = dt.make_complex_polygon(name, paths)
else:
main_annotation = dt.make_complex_polygon(name, annotation["complex_polygon"]["path"])
elif "bounding_box" in annotation:
bounding_box = annotation["bounding_box"]
main_annotation = dt.make_bounding_box(
name, bounding_box["x"], bounding_box["y"], bounding_box["w"], bounding_box["h"]
)
elif "tag" in annotation:
main_annotation = dt.make_tag(name)
elif "line" in annotation:
main_annotation = dt.make_line(name, annotation["line"]["path"])
elif "keypoint" in annotation:
main_annotation = dt.make_keypoint(name, annotation["keypoint"]["x"], annotation["keypoint"]["y"])
elif "ellipse" in annotation:
main_annotation = dt.make_ellipse(name, annotation["ellipse"])
elif "cuboid" in annotation:
main_annotation = dt.make_cuboid(name, annotation["cuboid"])
# TODO
# elif "skeleton" in annotation:
# main_annotation = dt.make_skeleton(name, annotation["skeleton"]["nodes"])
if not main_annotation:
print(f"[WARNING] Unsupported annotation type: '{annotation.keys()}'")
return None
if "instance_id" in annotation:
main_annotation.subs.append(dt.make_instance_id(annotation["instance_id"]["value"]))
if "attributes" in annotation:
main_annotation.subs.append(dt.make_attributes(annotation["attributes"]))
if "text" in annotation:
main_annotation.subs.append(dt.make_text(annotation["text"]["text"]))
return main_annotation
def parse_darwin_video_annotation(annotation: dict):
name = annotation["name"]
frame_annotations = {}
keyframes = {}
for f, frame in annotation["frames"].items():
frame_annotations[int(f)] = parse_darwin_annotation({**frame, **{"name": name}})
keyframes[int(f)] = frame.get("keyframe", False)
return dt.make_video_annotation(frame_annotations, keyframes, annotation["segments"], annotation.get("interpolated", False))
def split_video_annotation(annotation):
if not annotation.is_video:
raise AttributeError("this is not a video annotation")
frame_annotations = []
for i, frame_url in enumerate(annotation.frame_urls):
annotations = [a.frames[i] for a in annotation.annotations if i in a.frames]
annotation_classes = set([annotation.annotation_class for annotation in annotations])
filename = f"{Path(annotation.filename).stem}/{i:07d}.jpg"
frame_annotations.append(
dt.AnnotationFile(
annotation.path,
filename,
annotation_classes,
annotations,
False,
annotation.image_width,
annotation.image_height,
frame_url,
annotation.workview_url,
annotation.seq,
)
)
return frame_annotations
def ispolygon(annotation):
return annotation.annotation_type in ["polygon", "complex_polygon"]
def convert_polygons_to_sequences(
polygons: List, height: Optional[int] = None, width: Optional[int] = None, rounding: bool = True
) -> List:
"""
Converts a list of polygons, encoded as a list of dictionaries of into a list of nd.arrays
of coordinates.
Parameters
----------
polygons: list
List of coordinates in the format [{x: x1, y:y1}, ..., {x: xn, y:yn}] or a list of them
as [[{x: x1, y:y1}, ..., {x: xn, y:yn}], ..., [{x: x1, y:y1}, ..., {x: xn, y:yn}]].
height: int
Maximum height for a polygon coordinate
width: int
Maximum width for a polygon coordinate
Returns
-------
sequences: list[ndarray[float]]
List of arrays of coordinates in the format [[x1, y1, x2, y2, ..., xn, yn], ...,
[x1, y1, x2, y2, ..., xn, yn]]
"""
if not polygons:
raise ValueError("No polygons provided")
# If there is a single polygon composing the instance then this is
# transformed to polygons = [[{x: x1, y:y1}, ..., {x: xn, y:yn}]]
if isinstance(polygons[0], dict):
polygons = [polygons]
if not isinstance(polygons[0], list) or not isinstance(polygons[0][0], dict):
raise ValueError("Unknown input format")
sequences = []
for polygon in polygons:
path = []
for point in polygon:
# Clip coordinates to the image size
x = max(min(point["x"], width - 1) if width else point["x"], 0)
y = max(min(point["y"], height - 1) if height else point["y"], 0)
if rounding:
path.append(round(x))
path.append(round(y))
else:
path.append(x)
path.append(y)
sequences.append(path)
return sequences
def convert_sequences_to_polygons(sequences: List, height: Optional[int] = None, width: Optional[int] = None) -> List:
"""
Converts a list of polygons, encoded as a list of dictionaries of into a list of nd.arrays
of coordinates.
Parameters
----------
sequences: list
List of arrays of coordinates in the format [x1, y1, x2, y2, ..., xn, yn] or as a list of them
as [[x1, y1, x2, y2, ..., xn, yn], ..., [x1, y1, x2, y2, ..., xn, yn]]
height: int
Maximum height for a polygon coordinate
width: int
Maximum width for a polygon coordinate
Returns
-------
polygons: list[ndarray[float]]
List of coordinates in the format [[{x: x1, y:y1}, ..., {x: xn, y:yn}], ..., [{x: x1, y:y1}, ..., {x: xn, y:yn}]].
"""
if not sequences:
raise ValueError("No sequences provided")
# If there is a single sequences composing the instance then this is
# transformed to polygons = [[x1, y1, ..., xn, yn]]
if not isinstance(sequences[0], list):
sequences = [sequences]
if not isinstance(sequences[0][0], (int, float)):
raise ValueError("Unknown input format")
def grouped(iterable, n):
return zip(*[iter(iterable)] * n)
polygons = []
for sequence in sequences:
path = []
for x, y in grouped(sequence, 2):
# Clip coordinates to the image size
x = max(min(x, width - 1) if width else x, 0)
y = max(min(y, height - 1) if height else y, 0)
path.append({"x": x, "y": y})
polygons.append(path)
return {"path": polygons}
def convert_xyxy_to_bounding_box(box: List) -> dict:
"""
Converts a list of xy coordinates representing a bounding box into a dictionary
Parameters
----------
box: list
List of arrays of coordinates in the format [x1, y1, x2, y2]
Returns
-------
bounding_box: dict
Bounding box in the format {x: x1, y: y1, h: height, w: width}
"""
if not isinstance(box[0], (int, float)):
raise ValueError("Unknown input format")
x1, y1, x2, y2 = box
width = x2 - x1
height = y2 - y1
return {"x": x1, "y": y1, "w": width, "h": height}
def convert_bounding_box_to_xyxy(box: dict) -> list:
"""
Converts dictionary representing a bounding box into a list of xy coordinates
Parameters
----------
box: dict
Bounding box in the format {x: x1, y: y1, h: height, w: width}
Returns
-------
bounding_box: dict
List of arrays of coordinates in the format [x1, y1, x2, y2]
"""
x2 = box["x"] + box["width"]
y2 = box["y"] + box["height"]
return [box["x"], box["y"], x2, y2]
def convert_polygons_to_mask(polygons: List, height: int, width: int, value: Optional[int] = 1) -> np.ndarray:
"""
Converts a list of polygons, encoded as a list of dictionaries into an nd.array mask
Parameters
----------
polygons: list
List of coordinates in the format [{x: x1, y:y1}, ..., {x: xn, y:yn}] or a list of them
as [[{x: x1, y:y1}, ..., {x: xn, y:yn}], ..., [{x: x1, y:y1}, ..., {x: xn, y:yn}]].
Returns
-------
mask: ndarray[float]
ndarray mask of the polygon(s)
"""
sequence = convert_polygons_to_sequences(polygons, height=height, width=width)
mask = np.zeros((height, width)).astype(np.uint8)
draw_polygon(mask, sequence, value)
return mask
|
input = """
x | -f(1).
x | f(1).
"""
output = """
x | -f(1).
x | f(1).
"""
|
from rest_framework import status, response
__globals = globals()
def __status_name_filter(name):
return name.startswith('HTTP_')
class BaseResponseClass(response.Response):
"""
API response class with default status code.
:var status_code: HTTP status code.
:vartype status_code: int
:ivar timings: Response timings.
:vartype timings: int,None
:param timings: Response timings.
:vartype timings: int,None
"""
__slots__ = ('data', 'timings')
def __init__(self, *args, **kwargs):
self.timings = kwargs.pop('timings', None)
super().__init__(*args, **kwargs)
if isinstance(self.data, str):
self.data = {'detail': self.data}
for __status_name in filter(__status_name_filter, dir(status)):
__status_code = getattr(status, __status_name)
__response_name = f'Response{__status_code}'
__http_response_name = __status_name
__globals[__response_name] = type(
__response_name,
(BaseResponseClass,),
{"status_code": __status_code, "__slots__": ()}
)
__globals[__http_response_name] = __globals[__response_name]
|
import json
import subprocess
import time
import os
from json import JSONDecodeError
import requests
from util import build_url
from util.configutils import set_default_config_if_not_exists
""" This script performs an update of earkweb. This includes:
* db migrations
* updates to Solr schema
* updating existing/adding new Celery tasks
"""
# check for missing configuration options
default_config = [
("server", "flower_server_external", "127.0.0.1"),
("server", "flower_server_internal", "127.0.0.1"),
("server", "flower_port", "5555"),
("server", "flower_path", "/"),
]
from config.configuration import solr_field_list, solr_copy_fields, solr_config_changes, root_dir, solr_protocol, \
solr_service_url, solr_core_url
set_default_config_if_not_exists(os.path.join(root_dir, 'settings/settings.cfg'), default_config)
# colour codes
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# # requirements.txt: install new requirements
# print '\033[95m' + '----------------\nNow updating from requirements.txt.\n----------------' + '\033[0m'
# req_update_args = ['pip', 'install', '-r', 'requirements.txt']
# update_process = os.subprocess.Popen(req_update_args)
# update_out, update_err = update_process.communicate()
# if update_err is not None:
# print WARNING + 'There have been errors when updating from requirements.txt:\n' + '\033[0m'
# print update_err
# migrations - prepare
#cwd = "/earkweb" if os.path.exists("/earkweb") else root_dir
#print(HEADER + '----------------\nNow preparing database migrations.\n----------------' + ENDC)
#migrations_update_args = ['python', 'manage.py', 'makemigrations', 'earkweb']
#migrations_process = os.subprocess.Popen(migrations_update_args, cwd=cwd)
#migrations_out, migrations_err = migrations_process.communicate()
#if migrations_err is not None:
# print(WARNING + 'There have been errors when performing "earkweb" migrations:\n' + ENDC)
# print(migrations_err)
#migrations_update_args = ['python', 'manage.py', 'makemigrations']
#migrations_process = os.subprocess.Popen(migrations_update_args, cwd=cwd)
#migrations_out, migrations_err = migrations_process.communicate()
#if migrations_err is not None:
# print(WARNING + 'There have been errors when performing migrations:\n' + ENDC)
# print(migrations_err)
# migrations - apply
#print(HEADER + '----------------\nNow applying database migrations.\n----------------' + ENDC)
#migrations_update_args = ['python', 'manage.py', 'migrate']
#migrations_process = os.subprocess.Popen(migrations_update_args, cwd=cwd)
#migrations_out, migrations_err = migrations_process.communicate()
#if migrations_err is not None:
# print(WARNING + 'There have been errors when performing migrations:\n' + ENDC)
# print(migrations_err)
# scan for new/updated Celery tasks
#print(HEADER + '----------------\nNow scanning for new/updated Celery tasks.\n----------------' + ENDC)
#taskscan_args = ['python', 'taskbackend/scantasks.py']
#taskscan_process = os.subprocess.Popen(taskscan_args, cwd=cwd)
#taskscan_out, taskscan_err = taskscan_process.communicate()
#if taskscan_err is not None:
# print(WARNING + + 'There have been errors when updating Celery tasks:\n' + ENDC)
# print(taskscan_err)
from config.configuration import solr_core
ping_solr = requests.get("%s" % solr_service_url)
if ping_solr.status_code != 200:
print(FAIL + 'SolR service is not available at: %s\n' % solr_service_url + ENDC)
else:
ping_resp = requests.get("%s/admin/ping" % solr_core_url)
if ping_resp.status_code == 200:
try:
solr_status_resp = json.loads(ping_resp.text)
if "status" in solr_status_resp and solr_status_resp["status"] == "OK":
print(OKGREEN + ('SolR core "%s" available.\n' % solr_core) + ENDC)
else:
print(FAIL + ('SolR core "%s" is not available.\n' % solr_core) + ENDC)
raise ValueError()
except JSONDecodeError as err:
print(FAIL + 'Error parsing response message.\n' + ENDC)
raise ValueError()
# Solr: create new fields
print(HEADER + '----------------\nNow adding new Solr fields.\n----------------' + ENDC)
for field in solr_field_list:
print(OKBLUE + '## Adding new field: %s ##' % field['name'] + ENDC)
# simple field with name, type, stored parameter
solr_fields_args = ['curl', '-X', 'POST', '-H', '\'Content-type:application/json\'',
'--data-binary', '{"add-field": {"name": "%s", "type": "%s", "stored": "%s"}}' % (field['name'], field['type'], field['stored']),
'%s/schema' % solr_core_url]
try:
# check if 'indexed' is set (additional to parameters above)
if field['indexed']:
solr_fields_args = ['curl', '-X', 'POST', '-H', '\'Content-type:application/json\'',
'--data-binary', '{"add-field": {"name": "%s", "type": "%s", "stored": "%s", "indexed": "%s"}}' % (field['name'], field['type'], field['stored'], field['indexed']), '%s/schema' % solr_core_url]
except KeyError:
# expected behaviour if 'indexed' is not set
pass
solr_fields_process = subprocess.Popen(solr_fields_args)
solr_fields_out, solr_fields_err = solr_fields_process.communicate()
if solr_fields_err is not None:
print(WARNING + 'There have been errors when updating Solr fields:\n' + ENDC)
print(solr_fields_err)
time.sleep(2.5)
for field in solr_copy_fields:
print(OKBLUE + '## Adding new copy-field: from %s to %s ##' % (field['source'], field['dest']) + ENDC)
solr_fields_args = ['curl', '-X', 'POST', '-H', '\'Content-type:application/json\'',
'--data-binary', '{"add-copy-field": {"source": "%s", "dest": "%s"}}' % (field['source'], field['dest']),
'%s/schema' % solr_core_url]
solr_fields_process = subprocess.Popen(solr_fields_args)
solr_fields_out, solr_fields_err = solr_fields_process.communicate()
if solr_fields_err is not None:
print(WARNING + 'There have been errors when updating Solr fields:\n' + ENDC)
print(solr_fields_err)
print(HEADER + '----------------\nNow editing the Solr config.\n----------------' + ENDC)
for change in solr_config_changes:
print(OKBLUE + '## Editing class: %s ##' % change['class'] + ENDC)
solr_config_args = ['curl', '%s/config' % solr_core_url, '-H', '\'Content-type:application/json\'',
'-d', '{"%s":{"name":"%s", "class":"%s", "defaults": %s}}' %
(change['type'], change['path'], change['class'], change['fields'])]
solr_change_process = subprocess.Popen(solr_config_args)
solr_change_out, solr_change_err = solr_change_process.communicate()
if solr_change_err is not None:
print(WARNING + 'There have been errors when updating the Solr config file:\n' + ENDC)
print(solr_change_err)
|
import tensorflow as tf
w = tf.Variable(tf.constant(5.0))
loss = tf.square(w+1)
train_step = tf.train.GradientDescentOptimizer(0.0001).minimize(loss)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
for _ in range(40):
sess.run(train_step)
print('loss value: ', sess.run(loss), ' w value: ', sess.run(w))
|
"""
Classes for all SDSS_Catl_utils-specific exceptions
"""
__all__ = ["SDSSCatlUtils_Error"]
class SDSSCatlUtils_Error(Exception):
"""Base class of all LSS_Utils-specific exceptions"""
def __init__(self, message):
super(SDSSCatlUtils_Error, self).__init__(message)
|
"""Module for transforming lists of numbers into formatted ranges
and formatted ranges into lists of numbers
pylint rated 9.68/10
"""
from globalconstants import EMPTYCHAR, DASH, COMMA, BLANK, COMMABLANK, SLASH, LONGDASH
from indexclass import Index
from indexutilities import index_reduce
def split_up_range(string,
seg_length=5):
"""splits up the string with search result
output to allow it to be formatted for display
"""
returnlist = []
l_temp = string.split(COMMA)
if len(l_temp) < seg_length:
return [COMMA.join(l_temp)]
multip = int(len(l_temp)/seg_length)
rem = len(l_temp)-(multip*seg_length)
for a_temp in range(multip):
returnlist.append(COMMA.join(l_temp[a_temp*seg_length:
(a_temp+1)*seg_length-1]))
returnlist.append(COMMA.join
(l_temp[multip*seg_length :
multip*seg_length+rem-1]))
return returnlist
def de_range(range_string):
"""Takes a single formatted range and returns a list
"""
if DASH not in range_string:
return [int(range_string)]
try:
starting_number = int(range_string.split(DASH)[0])
ending_number = int(range_string.split(DASH)[1])
return list(range(starting_number, ending_number+1))
except:
pass
def range_set(entrystring):
"""takes a string with a sequence of ranges and returns
list"""
rangeset = set()
if entrystring == EMPTYCHAR:
return []
if DASH not in entrystring and COMMA not in entrystring:
return [int(entrystring)]
for e_temp in [e_temp.strip() for e_temp in entrystring.split(COMMA) if
(e_temp.isnumeric() or e_temp.replace(DASH, EMPTYCHAR).isnumeric())]:
rangeset = rangeset.union(set(de_range(e_temp)))
return rangeset
def range_find(pageset,reduce=True,compact=True,breaker=', '):
"""Tranforms a list of pages into a formatted range
Reduce to give indexes in a reduced form!
"""
if compact:
def integer_part (x):
if '.' in str(x):
return int(str(x).split('.')[0])
else:
return int(x)
pages = set()
pair_list = []
integer_pages = set()
for page in pageset:
ip_temp = integer_part(page)
integer_pages.add(ip_temp)
all_indexes = sorted(integer_pages)
del integer_pages
try:
starting = all_indexes[0]
except:
starting = 0
ending = starting
if len(all_indexes)>0:
for ind in all_indexes[1:]:
if ind == ending + 1:
ending = ind
elif ind > ending + 1:
pair_list.append((starting,ending))
starting = ind
ending = ind
else:
pass
if (len(pair_list)>0 and pair_list[-1] != (starting,ending)) or len(pair_list) == 0:
pair_list.append((starting,ending))
result = ''
for pair in pair_list:
starting,ending = pair[0],pair[1]
if ending>starting:
result+=str(starting)+LONGDASH+str(ending)+breaker
else:
result+=str(starting)+breaker
if len(result)>len(breaker):
return result[0:-len(breaker)]
else:
return ''
else:
return ''
pagerangelist = []
for page in sorted(pageset):
if isinstance(page,int):
if page in pageset and page-1 in pageset:
pagerangelist[-1].append(str(page))
elif page in pageset and not page-1 in pageset:
pagerangelist.append([str(page)])
elif isinstance(page,str):
if page.isnumeric():
if page in pageset and str(int(page)-1) in pageset:
pagerangelist[-1].append(str(page))
elif page in pageset and not str(int(page)-1) in pageset:
pagerangelist.append([str(page)])
elif type(page) == type(Index(0)):
if page in pageset and page-Index(1) in pageset:
pagerangelist[-1].append(str(page))
elif page in pageset and not page-Index(1) in pageset:
pagerangelist.append([str(page)])
def redux (x):
if reduce:
return index_reduce(x)
else:
return x
pagerangestringlist = []
for pagerange in pagerangelist:
if len(pagerange) == 1:
pagerangestringlist.append(redux(str(pagerange[0])))
else:
pagerangestringlist.append(redux(str(pagerange[0]))
+LONGDASH+redux(str(pagerange[-1])))
return COMMABLANK.join(pagerangestringlist)
|
import numpy as np
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_label_indicator_matrix
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import is_sequence_of_sequences
from sklearn.utils.multiclass import type_of_target
EXAMPLES = {
'multilabel-indicator': [
np.random.RandomState(42).randint(2, size=(10, 10)),
np.array([[0, 1], [1, 0]]),
np.array([[0, 1], [1, 0]], dtype=np.bool),
np.array([[0, 1], [1, 0]], dtype=np.int8),
np.array([[0, 1], [1, 0]], dtype=np.uint8),
np.array([[0, 1], [1, 0]], dtype=np.float),
np.array([[0, 1], [1, 0]], dtype=np.float32),
np.array([[0, 0], [0, 0]]),
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
np.array([[0, 1]]),
],
'multilabel-sequences': [
[[0, 1]],
[[0], [1]],
[[1, 2, 3]],
[[1, 2, 1]], # duplicate values, why not?
[[1], [2], [0, 1]],
[[1], [2]],
[[]],
[()],
np.array([[], [1, 2]], dtype='object'),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[1, 0, 2]]),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
[u'a', u'b'],
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
# not currently supported sequence of sequences
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(unique_labels([(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(unique_labels([[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]),
["a", "b", "c", "d"])
assert_array_equal(unique_labels([["a", "b"], ["c"]], [["d"]]),
["a", "b", "c", "d"])
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences",
"multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
#Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
#Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group.startswith('multilabel'):
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s' % (example, exp))
def test_is_label_indicator_matrix():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-indicator':
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_label_indicator_matrix(example),
msg='is_label_indicator_matrix(%r) should be %s'
% (example, exp))
def test_is_sequence_of_sequences():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-sequences':
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_sequence_of_sequences(example),
msg='is_sequence_of_sequences(%r) should be %s'
% (example, exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg='type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example)))
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, type_of_target, example)
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re, os
from frappe.utils.pdf import get_pdf
from frappe.email.smtp import get_outgoing_email_account
from frappe.utils import (get_url, scrub_urls, strip, expand_relative_urls, cint,
split_emails, to_markdown, markdown, random_string, parse_addr)
import email.utils
from six import iteritems, text_type, string_types, PY3
from email.mime.multipart import MIMEMultipart
from email.header import Header
if PY3:
from email import policy
def get_email(recipients, sender='', msg='', subject='[No Subject]',
text_content = None, footer=None, print_html=None, formatted=None, attachments=None,
content=None, reply_to=None, cc=[], bcc=[], email_account=None, expose_recipients=None,
inline_images=[], header=None):
""" Prepare an email with the following format:
- multipart/mixed
- multipart/alternative
- text/plain
- multipart/related
- text/html
- inline image
- attachment
"""
content = content or msg
emailobj = EMail(sender, recipients, subject, reply_to=reply_to, cc=cc, bcc=bcc, email_account=email_account, expose_recipients=expose_recipients)
if not content.strip().startswith("<"):
content = markdown(content)
emailobj.set_html(content, text_content, footer=footer, header=header,
print_html=print_html, formatted=formatted, inline_images=inline_images)
if isinstance(attachments, dict):
attachments = [attachments]
for attach in (attachments or []):
# cannot attach if no filecontent
if attach.get('fcontent') is None: continue
emailobj.add_attachment(**attach)
return emailobj
class EMail:
"""
Wrapper on the email module. Email object represents emails to be sent to the client.
Also provides a clean way to add binary `FileData` attachments
Also sets all messages as multipart/alternative for cleaner reading in text-only clients
"""
def __init__(self, sender='', recipients=(), subject='', alternative=0, reply_to=None, cc=(), bcc=(), email_account=None, expose_recipients=None):
from email import charset as Charset
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
if isinstance(recipients, string_types):
recipients = recipients.replace(';', ',').replace('\n', '')
recipients = split_emails(recipients)
# remove null
recipients = filter(None, (strip(r) for r in recipients))
self.sender = sender
self.reply_to = reply_to or sender
self.recipients = recipients
self.subject = subject
self.expose_recipients = expose_recipients
if PY3:
self.msg_root = MIMEMultipart('mixed', policy=policy.SMTPUTF8)
self.msg_alternative = MIMEMultipart('alternative', policy=policy.SMTPUTF8)
else:
self.msg_root = MIMEMultipart('mixed')
self.msg_alternative = MIMEMultipart('alternative')
self.msg_root.attach(self.msg_alternative)
self.cc = cc or []
self.bcc = bcc or []
self.html_set = False
self.email_account = email_account or get_outgoing_email_account(sender=sender)
def set_html(self, message, text_content = None, footer=None, print_html=None,
formatted=None, inline_images=None, header=None):
"""Attach message in the html portion of multipart/alternative"""
if not formatted:
formatted = get_formatted_html(self.subject, message, footer, print_html,
email_account=self.email_account, header=header, sender=self.sender)
# this is the first html part of a multi-part message,
# convert to text well
if not self.html_set:
if text_content:
self.set_text(expand_relative_urls(text_content))
else:
self.set_html_as_text(expand_relative_urls(formatted))
self.set_part_html(formatted, inline_images)
self.html_set = True
def set_text(self, message):
"""
Attach message in the text portion of multipart/alternative
"""
from email.mime.text import MIMEText
if PY3:
part = MIMEText(message, 'plain', 'utf-8', policy=policy.SMTPUTF8)
else:
part = MIMEText(message, 'plain', 'utf-8')
self.msg_alternative.attach(part)
def set_part_html(self, message, inline_images):
from email.mime.text import MIMEText
has_inline_images = re.search('''embed=['"].*?['"]''', message)
if has_inline_images:
# process inline images
message, _inline_images = replace_filename_with_cid(message)
# prepare parts
if PY3:
msg_related = MIMEMultipart('related', policy=policy.SMTPUTF8)
html_part = MIMEText(message, 'html', 'utf-8', policy=policy.SMTPUTF8)
else:
msg_related = MIMEMultipart('related')
html_part = MIMEText(message, 'html', 'utf-8')
msg_related.attach(html_part)
for image in _inline_images:
self.add_attachment(image.get('filename'), image.get('filecontent'),
content_id=image.get('content_id'), parent=msg_related, inline=True)
self.msg_alternative.attach(msg_related)
else:
if PY3:
self.msg_alternative.attach(MIMEText(message, 'html', 'utf-8', policy=policy.SMTPUTF8))
else:
self.msg_alternative.attach(MIMEText(message, 'html', 'utf-8'))
def set_html_as_text(self, html):
"""Set plain text from HTML"""
self.set_text(to_markdown(html))
def set_message(self, message, mime_type='text/html', as_attachment=0, filename='attachment.html'):
"""Append the message with MIME content to the root node (as attachment)"""
from email.mime.text import MIMEText
maintype, subtype = mime_type.split('/')
if PY3:
part = MIMEText(message, _subtype = subtype, policy=policy.SMTPUTF8)
else:
part = MIMEText(message, _subtype = subtype)
if as_attachment:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.msg_root.attach(part)
def attach_file(self, n):
"""attach a file from the `FileData` table"""
_file = frappe.get_doc("File", {"file_name": n})
content = _file.get_content()
if not content:
return
self.add_attachment(_file.file_name, content)
def add_attachment(self, fname, fcontent, content_type=None,
parent=None, content_id=None, inline=False):
"""add attachment"""
if not parent:
parent = self.msg_root
add_attachment(fname, fcontent, content_type, parent, content_id, inline)
def add_pdf_attachment(self, name, html, options=None):
self.add_attachment(name, get_pdf(html, options), 'application/octet-stream')
def validate(self):
"""validate the Email Addresses"""
from frappe.utils import validate_email_address
if not self.sender:
self.sender = self.email_account.default_sender
validate_email_address(strip(self.sender), True)
self.reply_to = validate_email_address(strip(self.reply_to) or self.sender, True)
self.replace_sender()
self.replace_sender_name()
self.recipients = [strip(r) for r in self.recipients]
self.cc = [strip(r) for r in self.cc]
self.bcc = [strip(r) for r in self.bcc]
for e in self.recipients + (self.cc or []) + (self.bcc or []):
validate_email_address(e, True)
def replace_sender(self):
if cint(self.email_account.always_use_account_email_id_as_sender):
self.set_header('X-Original-From', self.sender)
sender_name, sender_email = parse_addr(self.sender)
self.sender = email.utils.formataddr((str(Header(sender_name or self.email_account.name, 'utf-8')), self.email_account.email_id))
def replace_sender_name(self):
if cint(self.email_account.always_use_account_name_as_sender_name):
self.set_header('X-Original-From', self.sender)
sender_name, sender_email = parse_addr(self.sender)
self.sender = email.utils.formataddr((str(Header(self.email_account.name, 'utf-8')), sender_email))
def set_message_id(self, message_id, is_notification=False):
if message_id:
self.msg_root["Message-Id"] = '<' + message_id + '>'
else:
self.msg_root["Message-Id"] = get_message_id()
self.msg_root["isnotification"] = '<notification>'
if is_notification:
self.msg_root["isnotification"] = '<notification>'
def set_in_reply_to(self, in_reply_to):
"""Used to send the Message-Id of a received email back as In-Reply-To"""
self.set_header('In-Reply-To', in_reply_to)
def make(self):
"""build into msg_root"""
headers = {
"Subject": strip(self.subject),
"From": self.sender,
"To": ', '.join(self.recipients) if self.expose_recipients=="header" else "<!--recipient-->",
"Date": email.utils.formatdate(),
"Reply-To": self.reply_to if self.reply_to else None,
"CC": ', '.join(self.cc) if self.cc and self.expose_recipients=="header" else None,
'X-Frappe-Site': get_url(),
}
# reset headers as values may be changed.
for key, val in iteritems(headers):
if val:
self.set_header(key, val)
# call hook to enable apps to modify msg_root before sending
for hook in frappe.get_hooks("make_email_body_message"):
frappe.get_attr(hook)(self)
def set_header(self, key, value):
if key in self.msg_root:
del self.msg_root[key]
try:
self.msg_root[key] = value
except ValueError:
self.msg_root[key] = sanitize_email_header(value)
def as_string(self):
"""validate, build message and convert to string"""
self.validate()
self.make()
if PY3:
return self.msg_root.as_string(policy=policy.SMTPUTF8)
return self.msg_root.as_string()
def get_formatted_html(subject, message, footer=None, print_html=None,
email_account=None, header=None, unsubscribe_link=None, sender=None):
if not email_account:
email_account = get_outgoing_email_account(False, sender=sender)
rendered_email = frappe.get_template("templates/emails/standard.html").render({
"header": get_header(header),
"content": message,
"signature": get_signature(email_account),
"footer": get_footer(email_account, footer),
"title": subject,
"print_html": print_html,
"subject": subject
})
html = scrub_urls(rendered_email)
if unsubscribe_link:
html = html.replace("<!--unsubscribe link here-->", unsubscribe_link.html)
html = inline_style_in_html(html)
return html
@frappe.whitelist()
def get_email_html(template, args, subject, header=None):
import json
args = json.loads(args)
if header and header.startswith('['):
header = json.loads(header)
email = frappe.utils.jinja.get_email_from_template(template, args)
return get_formatted_html(subject, email[0], header=header)
def inline_style_in_html(html):
''' Convert email.css and html to inline-styled html
'''
from premailer import Premailer
apps = frappe.get_installed_apps()
css_files = []
for app in apps:
path = 'assets/{0}/css/email.css'.format(app)
if os.path.exists(os.path.abspath(path)):
css_files.append(path)
p = Premailer(html=html, external_styles=css_files, strip_important=False)
return p.transform()
def add_attachment(fname, fcontent, content_type=None,
parent=None, content_id=None, inline=False):
"""Add attachment to parent which must an email object"""
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
if not content_type:
content_type, encoding = mimetypes.guess_type(fname)
if not parent:
return
if content_type is None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
content_type = 'application/octet-stream'
maintype, subtype = content_type.split('/', 1)
if maintype == 'text':
# Note: we should handle calculating the charset
if isinstance(fcontent, text_type):
fcontent = fcontent.encode("utf-8")
part = MIMEText(fcontent, _subtype=subtype, _charset="utf-8")
elif maintype == 'image':
part = MIMEImage(fcontent, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(fcontent, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(fcontent)
# Encode the payload using Base64
from email import encoders
encoders.encode_base64(part)
# Set the filename parameter
if fname:
attachment_type = 'inline' if inline else 'attachment'
part.add_header('Content-Disposition', attachment_type, filename=text_type(fname))
if content_id:
part.add_header('Content-ID', '<{0}>'.format(content_id))
parent.attach(part)
def get_message_id():
'''Returns Message ID created from doctype and name'''
return "<{unique}@{site}>".format(
site=frappe.local.site,
unique=email.utils.make_msgid(random_string(10)).split('@')[0].split('<')[1])
def get_signature(email_account):
if email_account and email_account.add_signature and email_account.signature:
return "<br><br>" + email_account.signature
else:
return ""
def get_footer(email_account, footer=None):
"""append a footer (signature)"""
footer = footer or ""
args = {}
if email_account and email_account.footer:
args.update({'email_account_footer': email_account.footer})
company_address = frappe.db.get_default("email_footer_address")
if company_address:
args.update({'company_address': company_address})
if not cint(frappe.db.get_default("disable_standard_email_footer")):
args.update({'default_mail_footer': frappe.get_hooks('default_mail_footer')})
footer += frappe.utils.jinja.get_email_from_template('email_footer', args)[0]
return footer
def replace_filename_with_cid(message):
""" Replaces <img embed="assets/frappe/images/filename.jpg" ...> with
<img src="cid:content_id" ...> and return the modified message and
a list of inline_images with {filename, filecontent, content_id}
"""
inline_images = []
while True:
matches = re.search('''embed=["'](.*?)["']''', message)
if not matches: break
groups = matches.groups()
# found match
img_path = groups[0]
filename = img_path.rsplit('/')[-1]
filecontent = get_filecontent_from_path(img_path)
if not filecontent:
message = re.sub('''embed=['"]{0}['"]'''.format(img_path), '', message)
continue
content_id = random_string(10)
inline_images.append({
'filename': filename,
'filecontent': filecontent,
'content_id': content_id
})
message = re.sub('''embed=['"]{0}['"]'''.format(img_path),
'src="cid:{0}"'.format(content_id), message)
return (message, inline_images)
def get_filecontent_from_path(path):
if not path: return
if path.startswith('/'):
path = path[1:]
if path.startswith('assets/'):
# from public folder
full_path = os.path.abspath(path)
elif path.startswith('files/'):
# public file
full_path = frappe.get_site_path('public', path)
elif path.startswith('private/files/'):
# private file
full_path = frappe.get_site_path(path)
else:
full_path = path
if os.path.exists(full_path):
with open(full_path, 'rb') as f:
filecontent = f.read()
return filecontent
else:
return None
def get_header(header=None):
""" Build header from template """
from frappe.utils.jinja import get_email_from_template
if not header: return None
if isinstance(header, string_types):
# header = 'My Title'
header = [header, None]
if len(header) == 1:
# header = ['My Title']
header.append(None)
# header = ['My Title', 'orange']
title, indicator = header
if not title:
title = frappe.get_hooks('app_title')[-1]
email_header, text = get_email_from_template('email_header', {
'header_title': title,
'indicator': indicator
})
return email_header
def sanitize_email_header(str):
return str.replace('\r', '').replace('\n', '')
|
import json
import urllib.request
from movie_finder.models import Movie
from jackshen.settings import OMDB_API, OMDB_API_2, OMDB_LINK
from home.utils.check_progress import print_progress
print_progress(0, Movie.objects.count(), prefix="Progress:", suffix="Complete", length=50)
for i, item in enumerate(Movie.objects.order_by('-release').iterator()):
try:
json_data = (
urllib.request.urlopen(OMDB_LINK.format(imdb_id=item.imdb_id, api_key=OMDB_API))
.read()
.decode()
)
except Exception:
json_data = (
urllib.request.urlopen(OMDB_LINK.format(imdb_id=item.imdb_id, api_key=OMDB_API_2))
.read()
.decode()
)
data = json.loads(json_data)
item.votes = data["imdbVotes"].replace(",", "")
item.rating.rating = data["imdbRating"]
item.save()
print_progress(i + 1, Movie.objects.count(), prefix='Progress:', suffix='Complete', length=50)
print('Done!')
|
from os import getenv
from bot import bot
bot.run(getenv("DISCORD_TOKEN"))
|
# Copyright 2019 Nicole Borrelli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, make_response, request
from ips_util import Patch
from randomizer.flags import Flags
from randomizer.randomize import randomize
app = Flask(__name__, static_folder="static", static_url_path='')
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/patch', methods=['POST'])
def create_patch():
patch = bytearray()
filename = "patch.ips"
flags_string = request.form['flags']
rom_seed = request.form['seed']
flags = Flags()
flags.no_shuffle = flags_string.find("Op") != -1
flags.standard_shops = flags_string.find("Sv") != -1
flags.standard_treasure = flags_string.find("Tv") != -1
flags.default_start_gear = flags_string.find("Gv") != -1
flags.boss_shuffle = flags_string.find("B") != -1
xp_start = flags_string.find("Xp")
if xp_start >= 0:
xp_start += 2
xp_str = ""
while xp_start < len(flags_string) and flags_string[xp_start].isdigit():
xp_str += flags_string[xp_start]
xp_start += 1
flags.scale_levels = 1.0 / (int(xp_str) / 10.0)
with open("ff-dos.gba", "rb") as rom_file:
rom_data = bytearray(rom_file.read())
randomized_rom = randomize(rom_data, rom_seed, flags)
patch = Patch.create(rom_data, randomized_rom)
response = make_response(patch.encode())
response.headers['Content-Type'] = "application/octet-stream"
response.headers['Content-Disposition'] = f"inline; filename={filename}"
return response
|
"""
Main RenderChan package
"""
|
import scrapy
from tutorial.items import TutorialItem
class QuotesSpider(scrapy.Spider):
name = "quotes"
def start_requests(self):
urls = [
'http://icooon-mono.com/category/event-en/page/1/?lang=en',
'http://icooon-mono.com/category/event-en/page/2/?lang=en',
'http://icooon-mono.com/category/event-en/page/3/?lang=en',
'http://icooon-mono.com/category/event-en/page/4/?lang=en',
'http://icooon-mono.com/category/event-en/page/5/?lang=en',
'http://icooon-mono.com/category/event-en/page/6/?lang=en',
'http://icooon-mono.com/category/event-en/page/7/?lang=en',
'http://icooon-mono.com/category/event-en/page/8/?lang=en',
'http://icooon-mono.com/category/event-en/page/9/?lang=en',
'http://icooon-mono.com/category/event-en/page/10/?lang=en',
'http://icooon-mono.com/category/event-en/page/11/?lang=en',
'http://icooon-mono.com/category/event-en/page/12/?lang=en'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for sel in response.xpath('//div[@id="topMaincolumn"]/ul/li/a'):
p = sel.xpath('img/@src')[0].extract()
title = sel.xpath('p/text()').extract()
link = response.urljoin(p)
cardImage = TutorialItem()
cardImage['title'] = title
cardImage['image_urls'] = [link]
yield cardImage |
#!/usr/bin/env python
# encoding: utf-8
'''
@license: (C) Copyright 2013-2020, Node Supply Chain Manager Corporation Limited.
@time: 2021/5/10 14:57
@file: train.py
@author: baidq
@Software: PyCharm
@desc:
'''
from bert4keras.backend import keras
from bert4keras.tokenizers import Tokenizer
from bert4keras.snippets import sequence_padding, DataGenerator
from sklearn.metrics import classification_report
from bert4keras.optimizers import Adam
from nlu.intent_recg_bert.bert_model import build_bert_model
from nlu.intent_recg_bert.data_helper import load_data
#定义超参数和配置文件
class_nums = 13
maxlen = 128
batch_size = 8
config_path='D:\download\Data\Bert Model\chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path='D:\download\Data\Bert Model\chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = 'D:\download\Data\Bert Model\chinese_L-12_H-768_A-12/vocab.txt'
tokenizer = Tokenizer(dict_path)
class data_generator(DataGenerator):
"""
数据生成器
"""
def __iter__(self, random=False):
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
for is_end, (text, label) in self.sample(random):
token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
batch_labels.append([label])
if len(batch_token_ids) == self.batch_size or is_end:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
batch_labels = sequence_padding(batch_labels)
yield [batch_token_ids, batch_segment_ids], batch_labels
batch_token_ids, batch_segment_ids, batch_labels = [], [], []
def train():
# 加载数据集
train_data = load_data('./data/train.csv')
test_data = load_data('./data/test.csv')
# 转换数据集
train_generator = data_generator(train_data, batch_size)
test_generator = data_generator(test_data, batch_size)
model = build_bert_model(config_path, checkpoint_path, class_nums)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=Adam(5e-6),
metrics=['accuracy']
)
earlystop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
verbose=2,
mode='min'
)
# 保存最好的模型
best_model_filepath = 'best_model_weights'
checkpoint = keras.callbacks.ModelCheckpoint(best_model_filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode = 'min')
model.fit_generator(train_generator.forfit(),
steps_per_epoch=len(train_generator),
epochs=10,
validation_data=test_generator.forfit(),
validation_steps=len(test_generator),
shuffle=True,
callbacks=[earlystop, checkpoint])
# 评估
model.load_weights('best_model_weights')
test_pred = []
test_true = []
for x, y in test_generator:
p = model.predict(x).argmax(axis=1)
test_pred.extend(p)
test_true = test_data[:, 1].tolist()
print(set(test_true))
print(set(test_pred))
target_names = [line.strip() for line in open('label', 'r', encoding='utf8')]
print(classification_report(test_true, test_pred, target_names=target_names))
if __name__ == '__main__':
train() |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype cave-specific abstract base classes (ABCs).**
'''
# ....................{ TODO }....................
#FIXME: Refactor this private submodule into a new public "beartype.caver"
#submodule, so-named as it enables users to externally create new ad-hoc
#protocols implementing structural subtyping resembling those predefined by
#"beartype.cave". To do so:
#
#* In the "beartype.caver" submodule:
# * Define a new make_type_structural() function with signature resembling:
# def make_type_structural(name: str, method_names: Iterable) -> type:
# * Implement this function to dynamically create a new type with the passed
# classname defining:
# * Abstract methods with the passed method names.
# * A __subclasshook__() dunder method checking the passed class for
# concrete methods with these names.
# To do so, note that abstract methods *CANNOT* be dynamically
# monkey-patched in after class creation but *MUST* instead be statically
# defined at class creation time (due to metaclass shenanigans).
# Fortunately, doing so is trivial; simply use the three-argument form of
# the type() constructor, as demonstrated by this StackOverflow answer:
# https://stackoverflow.com/a/14219244/2809027
# * *WAIT!* There's no need to call the type() constructor diroctly. Instead,
# define a new make_type() function in this new submodule copied from the
# betse.util.type.classes.define_class() function (but renamed, obviously).
#* Replace the current manual definition of "_BoolType" below with an in-place
# call to that method from the "beartype.cave" submodule: e.g.,
# BoolType = _make_type_structural(
# name='BoolType', method_names=('__bool__',))
#
#Dis goin' be good.
#FIXME: Actually, don't do any of the above. That would simply be reinventing
#the wheel, as the "typing.Protocol" superclass already exists and is more than
#up to the task. In fact, once we drop support for Python < 3.7, we should:
#* Redefine the "_BoolType" class declared below should in terms of the
# "typing.Protocol" superclass.
#* Shift the "_BoolType" class directly into the "beartype.cave" submodule.
#* Refactor away this entire submodule.
# ....................{ IMPORTS }....................
from abc import ABCMeta, abstractmethod
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ FUNCTIONS }....................
def _check_methods(C: type, *methods: str):
'''
Private utility function called by abstract base classes (ABCs)
implementing structural subtyping by detecting whether the passed class or
some superclass of that class defines all of the methods with the passed
method names.
For safety, this function has been duplicated as is from its eponymous
counterpart in the private stdlib :mod:`_colletions_abc` module.
Parameters
----------
C : type
Class to be validated as defining these methods.
methods : Tuple[str, ...]
Tuple of the names of all methods to validate this class as defining.
Returns
----------
Either:
* ``True`` if this class defines all of these methods.
* ``NotImplemented`` if this class fails to define one or more of these
methods.
'''
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
# ....................{ SUPERCLASSES }....................
class BoolType(object, metaclass=ABCMeta):
'''
Type of all **booleans** (i.e., objects defining the ``__bool__()`` dunder
method; objects reducible in boolean contexts like ``if`` conditionals to
either ``True`` or ``False``).
This type matches:
* **Builtin booleans** (i.e., instances of the standard :class:`bool` class
implemented in low-level C).
* **NumPy booleans** (i.e., instances of the :class:`numpy.bool_` class
implemented in low-level C and Fortran) if :mod:`numpy` is importable.
Usage
----------
Non-standard boolean types like NumPy booleans are typically *not*
interoperable with the standard standard :class:`bool` type. In particular,
it is typically *not* the case, for any variable ``my_bool`` of
non-standard boolean type and truthy value, that either ``my_bool is True``
or ``my_bool == True`` yield the desired results. Rather, such variables
should *always* be coerced into the standard :class:`bool` type before
being compared -- either:
* Implicitly (e.g., ``if my_bool: pass``).
* Explicitly (e.g., ``if bool(my_bool): pass``).
Caveats
----------
**There exists no abstract base class governing booleans in Python.**
Although various Python Enhancement Proposals (PEPs) were authored on the
subject, all were rejected as of this writing. Instead, this type trivially
implements an ad-hoc abstract base class (ABC) detecting objects satisfying
the boolean protocol via structural subtyping. Although no actual
real-world classes subclass this :mod:`beartype`-specific ABC, the
detection implemented by this ABC suffices to match *all* boolean types.
See Also
----------
:class:`beartype.cave.ContainerType`
Further details on structural subtyping.
'''
# ..................{ DUNDERS }..................
# This abstract base class (ABC) has been implemented ala standard
# container ABCs in the private stdlib "_collections_abc" module (e.g., the
# trivial "_collections_abc.Sized" type).
__slots__ = ()
@abstractmethod
def __bool__(self):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is BoolType:
return _check_methods(C, '__bool__')
return NotImplemented
|
import abc
import os
import six
if six.PY3:
import io
file_types = (io.TextIOWrapper,)
else:
import abc
file_types = (abc.types.FileType,)
class Patch(object):
def __init__(self, file, new_suffix=".new", backup_suffix=".old", read_mode="r", write_mode="w", auto_begin=True):
if not isinstance(file, six.string_types + file_types):
raise TypeError("file must be string or file object")
self.file = file
self.new_suffix = new_suffix
self.backup_suffix = backup_suffix
self.read_mode = read_mode
self.write_mode = write_mode
if auto_begin:
self.begin()
def begin(self):
if isinstance(self.file, six.string_types):
self.infile = open(self.file, self.read_mode)
elif isinstance(self.file, file_types):
self.infile = self.file
self.write_mode = self.file.mode.replace("r", "w")
else:
raise TypeError("file must be string or file object")
if self.backup_suffix:
self.backupfile_name = self.infile.name+self.backup_suffix
if os.path.isfile(self.backupfile_name):
os.rename(self.backupfile_name, self.infile.name)
newfile_name = self.infile.name + self.new_suffix
self.newfile = open(newfile_name, self.write_mode)
def __enter__(self):
return self
def __iter__(self):
return self.infile.__iter__()
def read(self, n=None):
if n is None:
return self.infile.read()
else:
return self.infile.read(n)
def readline(self):
return self.infile.readline()
def readlines(self):
return self.infile.readlines()
def write(self, data):
return self.newfile.write(data)
def writelines(self, lines):
return self.newfile.writelines(lines)
def _close_all(self):
try:
self.infile.close()
except IOError:
pass
try:
self.newfile.close()
except IOError:
pass
def close(self):
self.commit()
def commit(self):
self._close_all()
# if backup is set, make a backup
if self.backup_suffix:
os.rename(self.infile.name, self.backupfile_name)
# move the new file to the orig
# this should be atomic on POSIX
os.rename(self.newfile.name, self.infile.name)
def rollback(self):
self._close_all()
os.unlink(self.newfile.name)
def __exit__(self, exc_type, exc_value, traceback):
if exc_type: # exception was raised, ignore the new file
self.rollback()
else: # no exception, commit
self.commit()
|
import numpy as np
import params
from CommClient import CommClient
from TFTrainer import TFTrainer as TR
def compute_norm(data):
return sum([np.sum(item**2) for item in data])
"""
@brief: 极坐标转欧氏坐标
@param [polar_coordinate]: 要转换的极坐标 | 都是用普通列表表示的坐标
@return: 转换结果(欧氏坐标)
"""
def polar2euclid(polar_coordinate):
return [polar_coordinate[0] * np.math.cos(polar_coordinate[1]), polar_coordinate[0] * np.math.sin(polar_coordinate[1])]
"""
@brief: 欧氏坐标转极坐标
@param [polar_coordinate]: 要转换的欧氏坐标 | 都是用普通列表表示的坐标
@return: 转换结果(极坐标)
"""
def euclid2polar(euclid_coordinate):
return [np.math.sqrt(euclid_coordinate[0]**2 + euclid_coordinate[1]**2), np.math.atan2(euclid_coordinate[1], euclid_coordinate[0])]
class Client():
def __init__(self) -> None:
pass
def run(self, data, label, p_d):
self.__comm = CommClient('127.0.0.1', 12345)
self.__trainer = TR()
self.__polar_position = p_d[0]
self.__polar_direction = p_d[1]
self.__euclid_position = polar2euclid(self.__polar_position)
self.__euclid_direction = polar2euclid(self.__polar_direction)
self.__hi = self.__polar_position[0]**(-params.PATHLOSS_FACTOR)
self.__transmit_power = params.CLIENT_TRANSMIT_POWER
for _ in range(params.ITERATION_NUM):
# 接收来自服务器的 Global Model
global_model = self.__comm.recv()
# 计算梯度
grad = self.__trainer.compute_gradient(global_model, data, label)
# 计算梯度的二范数
grad_norm = compute_norm(grad)
# 向服务器发送结果
self.__comm.send({'grad_norm': grad_norm, 'received_power': self.__hi * self.__transmit_power, 'position': self.__euclid_position})
# 接收服务器的调度结果:1为调度,0为未调度
sche_sig = self.__comm.recv()
if sche_sig == 1:
# 被调度后更新模型,得到 local model
self.__trainer.train_with_grad(grad)
# 向服务器发送 local model
self.__comm.send(self.__trainer.get_weights())
self.__update_user()
def __update_user(self):
self.__move(1)
self.__hi = self.__polar_position[0]**(-params.PATHLOSS_FACTOR)
def __move(self, time_elapsed):
distance = self.__polar_direction[0] * time_elapsed
pose_d = polar2euclid([distance, self.__polar_direction[1]])
self.__euclid_position[0] += pose_d[0]
self.__euclid_position[1] += pose_d[1]
self.__polar_position = euclid2polar(self.__euclid_position)
if self.__polar_position[0] > 100:
normal_dir = polar2euclid([1, self.__polar_position[1]])
dot_product = self.__euclid_direction[0] * normal_dir[0] + self.__euclid_direction[1] * normal_dir[1]
polar_rho_vec = [dot_product, self.__polar_position[1]]
euclid_rho_vec = polar2euclid(polar_rho_vec)
euclid_side_vec = [self.__euclid_direction[0] - euclid_rho_vec[0], self.__euclid_direction[1] - euclid_rho_vec[1]]
self.__euclid_direction[0], self.__euclid_direction[1] = euclid_side_vec[0] - euclid_rho_vec[0], euclid_side_vec[1] - euclid_rho_vec[1]
self.__polar_direction = euclid2polar(self.__euclid_direction)
if __name__ == '__main__':
client = Client()
client.run() |
def get_fasta(input, output):
with open(output, 'w') as op:
with open(input, 'r') as fp:
for (i, line) in enumerate(fp):
if i > 0:
now = line.split(',')
now_str = ">" + now[1] + "\n" + now[2] + "\n"
op.write(now_str)
# get_fasta('./data/Malonylation/Human/HM_encoded.csv', './data/Malonylation/Human/HM.fasta')
# get_fasta('./data/Malonylation/Mice/MM_encoded.csv', './data/Malonylation/Mice/MM.fasta') |
import itertools
import collections
import binascii
from moneywagon.services import get_service
from moneywagon.crypto_data import crypto_data
from moneywagon import ALL_SERVICES
class CurrencySupport(object):
support_categories = {
'address': ['address_form', 'private_key_form'],
'transaction': [ 'transaction_form', 'transaction_hash_algo', 'script_hash_algo'],
'block': ['header_hash_algo'],
}
project_attributes = {
'altcore': {
'address_form': ('sha256-check', 'groestl-check'),
'private_key_form': ('btc', ),
'transaction_form': ('btc', ),
'transaction_hash_algo': ('double-sha256', 'single-sha256'),
'script_hash_algo': ('double-sha256', 'groestl'),
'header_hash_algo': ('double-sha256', 'groestl')
},
'moneywagon': {
'address_form': ('sha256-check', ),
'private_key_form': ('btc', ),
'transaction_form': ('btc', ),
'transaction_hash_algo': ('double-sha256', ),
'script_hash_algo': ('double-sha256', ),
'header_hash_algo': ('double-sha256', )
},
}
def __init__(self, verbose=False):
self.verbose = verbose
@property
def sorted_crypto_data(self):
return sorted(crypto_data.items(), key=lambda x: x[1]['name'])
def is_all_supported(self, crypto_data_item, project, level):
if level == 'full':
fields_to_check = list(itertools.chain.from_iterable(self.support_categories.values()))
else:
fields_to_check = self.support_categories[level]
for field, supported in self.project_attributes[project].items():
value = crypto_data_item.get(field)
if field not in fields_to_check:
continue
if value is not None and value not in supported:
return False
return True
def supported_currencies(self, project='moneywagon', level="full"):
"""
Returns a list of all currencies that are supported by the passed in project.
and support level. Support level can be: "block", "transaction", "address"
or "full".
"""
ret = []
if project == 'multiexplorer-wallet':
for currency, data in self.sorted_crypto_data:
if not data.get("bip44_coin_type"):
continue
if len(data.get('services', {}).get("push_tx", [])) < 1:
continue
if len(data.get('services', {}).get("historical_transactions", [])) < 1:
continue
if len(data.get('services', {}).get("single_transaction", [])) < 1:
continue
if len(data.get('services', {}).get("unspent_outputs", [])) < 1:
continue
ret.append(currency)
altcore_tx = self.supported_currencies('altcore', level="transaction")
return [x for x in ret if x in altcore_tx]
for symbol, data in self.sorted_crypto_data:
if symbol == '': # template
continue
if self.is_all_supported(data, project, level):
ret.append(symbol)
ret.append('bch')
return ret
def not_supported_currencies(self, project='moneywagon', level="full"):
"""
Returns a list of all currencies that are defined in moneywagon, by not
supported by the passed in project and support level. Support level can
be: "block", "transaction", "address" or "full".
"""
supported = self.supported_currencies(project, level)
ret = []
for symbol, data in self.sorted_crypto_data:
if symbol == '': # template
continue
if symbol not in supported:
ret.append(symbol)
return ret
def altcore_data(self):
"""
Returns the crypto_data for all currencies defined in moneywagon that also
meet the minimum support for altcore. Data is keyed according to the
bitcore specification.
"""
ret = []
for symbol in self.supported_currencies(project='altcore', level="address"):
data = crypto_data[symbol]
priv = data.get('private_key_prefix')
pub = data.get('address_version_byte')
hha = data.get('header_hash_algo')
shb = data.get('script_hash_byte')
supported = collections.OrderedDict()
supported['name'] = data['name']
supported['alias'] = symbol
if pub is not None:
supported['pubkeyhash'] = int(pub)
if priv:
supported['privatekey'] = priv
supported['scripthash'] = shb if shb else 5
if 'transaction_form' in data:
supported['transactionForm'] = data['transaction_form']
if 'private_key_form' in data:
supported['privateKeyForm'] = data['private_key_form']
#if 'message_magic' in data and data['message_magic']:
# supported['networkMagic'] = '0x%s' % binascii.hexlify(data['message_magic'])
supported['port'] = data.get('port') or None
if hha not in (None, 'double-sha256'):
supported['headerHashAlgo'] = hha
if data.get('script_hash_algo', 'double-sha256') not in (None, 'double-sha256'):
supported['scriptHashAlgo'] = data['script_hash_algo']
if data.get('transaction_hash_algo', 'double-sha256') not in (None, 'double-sha256'):
supported['transactionHashAlgo'] = data['transaction_hash_algo']
if data.get('seed_nodes'):
supported['dnsSeeds'] = data['seed_nodes']
ret.append(supported)
return ret
def service_support(method=None, service=None, timeout=0.1, verbose=False):
possible_args = {
'get_current_price': ['btc', 'usd'],
'get_balance': ['btc', '123445'],
'get_orderbook': ['btc', 'usd'],
'push_tx': ['btc', '23984729847298'],
'make_order': ['btc', 'usd', 9999999999, 99999999, "sell"],
'get_exchange_balance': ['btc'],
'get_deposit_address': ['btc'],
'initiate_withdraw': ['btc', 999999999999, '123456'],
}
matched = []
def determine_support(s, method):
try:
getattr(s, method)(*possible_args[method])
except NotImplementedError:
if verbose:
print("not implemented", s.name)
return False
except Exception as exc:
if verbose:
print ("implemented", s.name, exc, str(exc))
return True
return True
if method:
for Service in ALL_SERVICES:
s = Service(timeout=timeout)
if determine_support(s, method):
matched.append(s.name)
elif service:
s = get_service(name=service)(timeout=timeout)
for method in possible_args:
if determine_support(s, method):
matched.append(method)
return matched
|
import json
import sys
assert sys.argv[1].endswith(".jsonl")
data = []
with open(sys.argv[1]) as f:
lines = f.read().split("\n")
for line in lines:
if line:
data.append(json.loads(line))
with open(sys.argv[1].replace(".jsonl", ".json"), "w") as f:
json.dump(data, f) |
"""This module contains the general information for FirmwareUpgradeDetail ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class FirmwareUpgradeDetailConsts():
CATEGORY_CATALOG = "catalog"
CATEGORY_CONFIG = "config"
CATEGORY_DATA_LOAD = "data-load"
CATEGORY_FAULTS = "faults"
CATEGORY_OTHER = "other"
CATEGORY_SERVER_REBOOT = "server-reboot"
SEVERITY_ERROR = "error"
SEVERITY_FATAL = "fatal"
SEVERITY_INFO = "info"
SEVERITY_UNKNOWN = "unknown"
SEVERITY_WARN = "warn"
class FirmwareUpgradeDetail(ManagedObject):
"""This is FirmwareUpgradeDetail class."""
consts = FirmwareUpgradeDetailConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("FirmwareUpgradeDetail", "firmwareUpgradeDetail", "id-[id]", VersionMeta.Version141a, "InputOutput", 0x1f, [], ["admin"], [u'firmwareUpgradeInfo'], [], ["Get"])
prop_meta = {
"category": MoPropertyMeta("category", "category", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["catalog", "config", "data-load", "faults", "other", "server-reboot"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version141a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"severity": MoPropertyMeta("severity", "severity", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["error", "fatal", "info", "unknown", "warn"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"category": "category",
"childAction": "child_action",
"description": "description",
"dn": "dn",
"id": "id",
"rn": "rn",
"severity": "severity",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.category = None
self.child_action = None
self.description = None
self.severity = None
self.status = None
ManagedObject.__init__(self, "FirmwareUpgradeDetail", parent_mo_or_dn, **kwargs)
|
#!/usr/bin/env python3
import omni_const
import omni_config
import omni_unpwd
import sys
import nmap
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed
from multiprocessing import cpu_count
from easysnmp import Session
from omnissiah.db import OmniDB
from omnissiah.omnissiah import OmniProgram
from omnissiah.msg import msg_loaded_records, msg_db_added_records, msg_scan_snmp_oid_walk
from omnissiah.const import snmp_community_infoid
from omnissiah.util import split_dict, split_list, hex_from_octets
select_missed_communities_sql = "SELECT raw_scan_ip.ipid, raw_scan_script.value FROM raw_scan_ip INNER JOIN raw_scan_port ON raw_scan_ip.ipid=raw_scan_port.ipid \
INNER JOIN raw_scan_script ON raw_scan_port.portid=raw_scan_script.portid WHERE raw_scan_script.script='snmp-brute' AND \
raw_scan_ip.ipid NOT IN (SELECT ipid FROM raw_scan_ip_info WHERE infoid=1) AND raw_scan_script.value LIKE '% - Valid credentials%';"
insert_scan_infos_sql = 'INSERT INTO raw_scan_ip_info(ipid, infoid, value) VALUES (%s, %s, %s)';
select_snmp_communities_sql = "SELECT raw_scan_ip.ip, raw_scan_ip_info.value, raw_scan_ip.ipid, ref_ipprefix.siteid FROM raw_scan_ip \
INNER JOIN raw_scan_ip_info ON raw_scan_ip.ipid=raw_scan_ip_info.ipid \
INNER JOIN ref_scan_ip_info ON raw_scan_ip_info.infoid=ref_scan_ip_info.infoid \
LEFT JOIN ref_ipprefix ON raw_scan_ip.refid=ref_ipprefix.ipprefixid AND raw_scan_ip.sourceid=1 \
WHERE ref_scan_ip_info.info='snmp_community';"
select_ping_ipprefix_sql = 'SELECT DISTINCT ref_ipprefix.siteid, ref_ipprefix.startip, ref_ipprefix.netnum FROM raw_scan_ip \
LEFT JOIN ref_ipprefix ON raw_scan_ip.refid=ref_ipprefix.ipprefixid WHERE raw_scan_ip.sourceid=1 ORDER BY ref_ipprefix.siteid;'
select_oids_sql = 'SELECT oidid, name, oid, command, prescan FROM ref_scan_snmp_oid;'
insert_snmp_sql = 'INSERT INTO raw_snmp(ipid, oidid, oid, snmp_type, value, value_hex, vlan) VALUES (%s, %s, %s, %s, %s, %s, %s)';
select_vlan_oid_sql = "SELECT raw_scan_ip.ip, raw_snmp.oid FROM raw_snmp \
INNER JOIN ref_scan_snmp_oid ON raw_snmp.oidid=ref_scan_snmp_oid.oidid \
INNER JOIN raw_scan_ip ON raw_snmp.ipid=raw_scan_ip.ipid \
WHERE ref_scan_snmp_oid.name='vtpVlanState' AND raw_snmp.value='1' \
ORDER BY raw_scan_ip.ip;"
def add_missed_communities(db, log):
cur = db.cursor()
cur.execute(select_missed_communities_sql)
vallist = []
for r in cur.fetchall():
for s in r[1].split('\n'):
if 'valid credentials' in s.lower():
vallist.append((r[0], snmp_community_infoid, s.split('-')[0].strip()))
break
if vallist:
cur.executemany(insert_scan_infos_sql, vallist)
db.commit()
cur.close()
def select_snmp_ips(db, log):
result = {}
cur = db.cursor()
cur.execute(select_snmp_communities_sql)
for r in cur.fetchall():
result[r[0]] = {'ip':r[0], 'community':r[1], 'ipid':r[2], 'siteid':r[3], 'ping':[], 'vlan':None}
sites = {}
cur.execute(select_ping_ipprefix_sql)
for r in cur.fetchall():
if r[0] not in sites:
sites[r[0]] = []
sites[r[0]].append({'startip':r[1], 'netnum':r[2]})
for ip, vsnmp in result.items():
if vsnmp['siteid'] in sites:
for prefix in sites[vsnmp['siteid']]:
hosts = prefix['startip'] + '/' + str(prefix['netnum'])
timeout = omni_config.snmp_ping_timeout if prefix['netnum']>=24 else \
omni_config.snmp_ping_timeout*pow(2, 24-prefix['netnum'])
vsnmp['ping'].append({'hosts':hosts, 'timeout':timeout})
log.info(msg_loaded_records.format('SNMP hosts', len(result)))
return result
def select_oids(db, log):
result = {}
cur = db.cursor()
cur.execute(select_oids_sql)
for r in cur.fetchall():
result[r[0]] = {'oidid':r[0], 'name':r[1], 'oid':r[2], 'command':r[3], 'prescan':(True if r[4] else False)}
cur.close()
return result
def single_host_oid_walk(ip, oid, community, timeout, retries, log, bulk=True, pings=[], vlan=None):
try:
result = None
if oid['prescan']:
nmapscan = nmap.PortScanner()
for ping in pings:
try:
nmapscan.scan(hosts=ping['hosts'], arguments=omni_config.snmp_ping_scan, sudo=True,
timeout=ping['timeout'])
except:
log.exception('Fatal error')
session = Session(hostname=ip, community=community, version=2, timeout=timeout, retries=retries)
if oid['command']=='walk':
if bulk:
result = session.bulkwalk(oid['oid'])
else:
result = session.walk(oid['oid'])
elif oid['command']=='get':
result = session.get(oid['oid'])
if not isinstance(result, list):
result = [result]
result = {'ip':ip, 'oidid':oid['oidid'], 'vlan':vlan, 'snmp':result}
except:
# log.exception('Fatal error')
result = None
return result
def single_process_snmp_oid_walk(ips, oid, timeout, retries, threadsnum, log):
results = []
with ThreadPoolExecutor(max_workers=threadsnum) as executor:
futures = []
for ipinfo in ips:
futures.append(executor.submit(single_host_oid_walk, ip=ipinfo['ip'], oid=oid, community=ipinfo['community'],
timeout=timeout, retries=retries, log=log, bulk=True, pings=ipinfo['ping'], vlan=ipinfo['vlan']))
for future in as_completed(futures):
result = future.result()
if result is not None:
results.append(result)
executor.shutdown(wait=False, cancel_futures=True)
return results
def snmp_walk(ips, oids, cpusnum, log):
walks = []
if isinstance(ips, dict):
pools = split_list(list(ips.values()), cpusnum)
else:
pools = split_list(ips, cpusnum)
jobs = []
for oidid, oid in oids.items():
for pool in pools:
jobs.append({'ips':pool, 'oid':oid, 'timeout':omni_config.snmp_timeout, 'retries':omni_config.snmp_retries,
'threadsnum':omni_config.snmp_threads, 'log':log})
with ProcessPoolExecutor(max_workers=cpusnum) as executor:
futures = []
for job in jobs:
futures.append(executor.submit(single_process_snmp_oid_walk, ips=job['ips'], oid=job['oid'], timeout=job['timeout'],
retries=job['retries'], threadsnum=job['threadsnum'], log=job['log']))
for future in as_completed(futures):
result = future.result()
walks.extend(result)
executor.shutdown(wait=False, cancel_futures=True)
return walks
def save_walks(db, ips, oids, walks, log):
cur = db.cursor()
vallist = []
for walk in walks:
for snmp in walk['snmp']:
value = str(snmp.value)
value_hex = hex_from_octets(snmp.value)
oid = snmp.oid + '.' + snmp.oid_index if snmp.oid_index else snmp.oid
value = value.replace('\x00', '')
value = value[:omni_config.snmp_max_value_len]
value_hex = value_hex[:omni_config.snmp_max_value_len*2]
if value:
vallist.append((ips[walk['ip']]['ipid'], walk['oidid'], oid, snmp.snmp_type, value, value_hex, walk['vlan']))
if vallist:
cur.executemany(insert_snmp_sql, vallist)
db.commit()
log.info(msg_db_added_records.format('raw_snmp', len(vallist)))
def select_vlan_ips(db, ips, log):
result = []
cur = db.cursor()
cur.execute(select_vlan_oid_sql)
for r in cur.fetchall():
record = ips[r[0]].copy()
record['vlan'] = int(r[1].split('.')[-1])
record['community'] = record['community'] + '@' + str(record['vlan'])
result.append(record)
log.info(msg_loaded_records.format('SNMP hosts and vlans', len(result)))
return result
def main():
try:
exitcode = 1
program = OmniProgram(omni_config.log_path, omni_config.log_level, omni_config.log_format, omni_config.log_date_format)
omnidb = OmniDB(omni_config.dbtype, omni_config.dbhost, omni_config.dbname,
omni_unpwd.db_raw_user, omni_unpwd.db_raw_password, log=program.log, program=program.name, ssl=omni_config.dbssl)
omnidb.run_program_queries(stage=1)
add_missed_communities(omnidb, program.log)
ips = select_snmp_ips(omnidb, program.log)
oids = select_oids(omnidb, program.log)
omnidb.close()
cpusnum = omni_config.scan_processes_num if omni_config.scan_processes_num else cpu_count() // 2 - 1
cpusnum = cpusnum if cpusnum>0 else 1
walks = snmp_walk(ips, oids, cpusnum, program.log)
omnidb = OmniDB(omni_config.dbtype, omni_config.dbhost, omni_config.dbname,
omni_unpwd.db_raw_user, omni_unpwd.db_raw_password, log=program.log, program=program.name, ssl=omni_config.dbssl)
save_walks(omnidb, ips, oids, walks, program.log)
omnidb.run_program_queries(stage=2)
ips_vlan = select_vlan_ips(omnidb, ips, program.log)
omnidb.close()
oids_vlan = {k:v for k,v in oids.items() if v['name'] in [omni_const.oid_macaddrtable_name, omni_const.oid_macporttable_name]}
if oids_vlan:
walks = snmp_walk(ips_vlan, oids_vlan, cpusnum, program.log)
else:
walks = None
omnidb = OmniDB(omni_config.dbtype, omni_config.dbhost, omni_config.dbname,
omni_unpwd.db_raw_user, omni_unpwd.db_raw_password, log=program.log, program=program.name, ssl=omni_config.dbssl)
if walks:
save_walks(omnidb, ips, oids, walks, program.log)
omnidb.run_program_queries(stage=3)
omnidb.close()
exitcode = 0
except:
program.log.exception('Fatal error')
finally:
return exitcode
if __name__ == "__main__":
sys.exit(main()) |
import argparse
from collections import defaultdict
from format import bogich
from classifier import svm_standard, svm_probability, sklearn_svm, naive_bayes, decision_tree, stochastic_gradient_descent, random_forest, gradient_boosting, extra_trees, gmm, k_means, ward, sklearn_novelty
FORMATS = {
'bogich': bogich,
}
CLASSIFIERS = {
'svm_standard': svm_standard,
'svm_probability': svm_probability,
'sklearn_svm': sklearn_svm,
'naive_bayes': naive_bayes,
'decision_tree': decision_tree,
'stochastic_gradient_descent': stochastic_gradient_descent,
'random_forest': random_forest,
'gradient_boosting': gradient_boosting,
'extra_trees': extra_trees,
'gmm': gmm,
'k_means': k_means,
'ward': ward,
'sklearn_novelty': sklearn_novelty,
}
def cross_validate(data, classifier):
correct = defaultdict(int)
wrong = defaultdict(int)
for i in range(0, len(data)):
train = []
test = []
for j, item in enumerate(data):
if i != j:
train.append(item)
else:
test.append(item)
prediction = classifier.classify(train, test)[0][0]
actual = test[0]['attr']['Disease']
if prediction == actual:
correct[actual] += 1
else:
print "Misclassified %s as %s in row %d" % (actual, prediction, i)
wrong[actual] += 1
total_correct = 0
total_wrong = 0
for label in set(correct.keys() + wrong.keys()):
print "%s identified %d/%d times (%d percent)" % (label, correct[label], correct[label] + wrong[label], int (round (float (correct[label]) / float (correct[label] + wrong[label]) * 100.00)))
total_correct += correct[label]
total_wrong += wrong[label]
print ''
print "Total Correct: %d/%d" % (total_correct, total_correct + total_wrong)
def test(train, test, classifier):
correct = defaultdict(int)
wrong = defaultdict(int)
(predictions, probabilities) = classifier.classify(train, test)
for i in range(0, len(test)):
actual = test[i]['attr']['Disease']
if actual:
if actual == predictions[i]:
correct[actual] += 1
else:
print "Misclassified %s as %s" % (actual, predictions[i])
wrong[actual] += 1
total_correct = 0
total_wrong = 0
for label in set(correct.keys() + wrong.keys()):
print "%s identified %d/%d times (%d percent)" % (label, correct[label], correct[label] + wrong[label], int (round (float (correct[label]) / float (correct[label] + wrong[label]) * 100.00)))
total_correct += correct[label]
total_wrong += wrong[label]
print ''
print "Total Correct: %d/%d" % (total_correct, total_correct + total_wrong)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train classifiers and run on test datas.')
parser.add_argument('-training', help="Training data file", default="ProMED_master_clean.csv")
parser.add_argument('-training_format', help="Format of training data", choices=FORMATS.keys(), default="bogich")
parser.add_argument('-classifier', help="Classifier", choices=CLASSIFIERS.keys(), default='svm_standard')
parser.add_argument('-cross_validate', help="Whether to run cross validation", default=False)
parser.add_argument('-test', help="Test data file", default=None)
parser.add_argument('-test_format', help="Format of test data", choices=FORMATS.keys(), default="bogich")
args = parser.parse_args()
training_format = FORMATS[args.training_format]
training_data = training_format.read(args.training)
classifier = CLASSIFIERS[args.classifier]
if args.cross_validate:
print "Running cross validation"
cross_validate(training_data, classifier)
if args.test:
print "Testing"
test_format = FORMATS[args.test_format]
test_data = test_format.read(args.test)
test(training_data, test_data, classifier)
|
# Author: Runqing Xu, Bohua Zhan
"""API for computing integrals."""
import json
from flask import request
from flask.json import jsonify
from lark import Lark, Transformer, v_args, exceptions
from fractions import Fraction
from sympy import expand_multinomial
import pathlib
import os
import integral
from logic import basic
from integral import slagle
from integral import proof
from app.app import app
basic.load_theory('interval_arith')
@app.route("/api/integral-load-file-list", methods=['POST'])
def integral_load_file_list():
os.chdir('./integral/examples')
json_files = tuple(str(z) for z in list(pathlib.Path('./').rglob('*.json')))
os.chdir('../../')
return jsonify({
'file_list': json_files
})
@app.route("/api/integral-open-file", methods=['POST'])
def integral_open_file():
data = json.loads(request.get_data().decode('utf-8'))
file_name = "integral/examples/%s" % data['filename']
with open(file_name, 'r', encoding='utf-8') as f:
f_data = json.load(f)
for item in f_data['content']:
problem = integral.parser.parse_expr(item['problem'])
item['_problem_latex'] = integral.latex.convert_expr(problem)
return jsonify(f_data)
@app.route("/api/integral-initialize", methods=['POST'])
def integral_initialize():
data = json.loads(request.get_data().decode('utf-8'))
problem = integral.parser.parse_expr(data['problem'])
return jsonify({
'text': str(problem),
'latex': integral.latex.convert_expr(problem),
'reason': "Initial"
})
@app.route("/api/integral-validate-integral", methods=['POST'])
def integral_validate_integral():
data = json.loads(request.get_data().decode('utf-8'))
try:
problem = integral.parser.parse_expr(data['expr'])
index = int(data['index'])
return jsonify({
'flag': True,
'content': {
'name': 'Exercise ' + str(data['index']),
'problem': data['expr'],
'_problem_latex': integral.latex.convert_expr(problem),
}
})
except:
return jsonify({
'flag': False
})
@app.route("/api/integral-super-simplify", methods=['POST'])
def integral_super_simplify():
data = json.loads(request.get_data().decode('utf-8'))
rules_set = [integral.rules.Simplify(), integral.rules.OnSubterm(integral.rules.Linearity()), integral.rules.OnSubterm(integral.rules.CommonIntegral())]
# abs_rule = integral.rules.ElimAbs()
problem = integral.parser.parse_expr(data['problem'])
# if not (abs_rule.check_zero_point(problem) and len(problem.getAbs()) == 0):
# # If there are no abs expression or there are no zero point
# rules_set.append(integral.rules.OnSubterm(integral.rules.ElimAbs()))
def simplify(problem):
for i in range(5):
for r in rules_set:
problem = r.eval(problem)
if problem.is_constant():
return problem
return problem
problem = simplify(integral.parser.parse_expr(data['problem']))
step = {
'text': str(problem),
'latex': integral.latex.convert_expr(problem),
'reason': "Simplification",
}
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'])
return jsonify(step)
@app.route("/api/integral-elim-abs", methods=["POST"])
def integral_elim_abs():
data = json.loads(request.get_data().decode('utf-8'))
rule = integral.rules.ElimAbs()
problem = integral.parser.parse_expr(data['problem'])
if not rule.check_zero_point(problem):
new_problem = rule.eval(problem)
step = {
'reason': "Elim abs",
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'location': data['location']
}
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'])
return jsonify(step)
c = rule.get_zero_point(problem)
new_problem = rule.eval(problem)
step = {
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'reason': "Elim abs",
'params': {
'c': str(c)
},
'location': data['location']
}
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'])
return jsonify(step)
@app.route("/api/integral-integrate-by-equation", methods=['POST'])
def integrate_by_equation():
data = json.loads(request.get_data().decode('utf-8'))
rhs = integral.parser.parse_expr(data['rhs'])
lhs = integral.parser.parse_expr(data['lhs'])
rule = integral.rules.IntegrateByEquation(lhs)
if not rule.validate(rhs):
return jsonify({
'flag': False
})
new_problem = rule.eval(rhs)
coeff = rule.coeff
return jsonify({
"text": str(new_problem),
"latex": integral.latex.convert_expr(new_problem),
"params": {
"factor": str(coeff),
"prev_id": str(int(data['prev_id']) - 1)
},
"reason": "Solve equation",
"_latex_reason": "By solving equation: \\(%s = %s\\)" % (
integral.latex.convert_expr(lhs), integral.latex.convert_expr(rhs)
)
})
@app.route("/api/integral-separate-integrals", methods=['POST'])
def integral_separate_integrals():
data = json.loads(request.get_data().decode('utf-8'))
problem = integral.parser.parse_expr(data['problem'])
integrals = problem.separate_integral()
n = []
for i, loc in integrals:
n.append({
"text": str(i),
"var_name": i.var,
"body": str(i.body),
"latex": integral.latex.convert_expr(i),
"location": str(loc)
})
return json.dumps(n)
@app.route("/api/integral-compose-integral", methods=['POST'])
def integral_compose_integral():
data = json.loads(request.get_data().decode('utf-8'))
new_integral = []
latex_reason = ""
reason = ""
modified_index = int(data['index'])
location = ""
if 'location' in data['problem'][modified_index]:
location = data['problem'][modified_index]['location']
denom = ""
rhs = ""
params = {}
for d in data['problem']:
new_integral.append(integral.parser.parse_expr(d['text']))
if '_latex_reason' in d:
latex_reason += d['_latex_reason']
if 'reason' in d:
reason += d['reason']
if 'params' in d:
params = d['params']
if 'denom' in d:
denom = d['denom']
if 'rhs' in d:
rhs = d['rhs']
curr = integral.parser.parse_expr(data['cur_calc'])
new_expr = curr
old_integral = curr.separate_integral()
for i in range(len(old_integral)):
new_expr = new_expr.replace_trig(old_integral[i][0], new_integral[i])
info = {
'text': str(new_expr),
'latex': integral.latex.convert_expr(new_expr),
'reason': reason,
'checked': data['problem'][data['index']]['checked'],
'proof': data['problem'][data['index']]['proof']
}
if location != "":
info.update({'location': location})
if params:
info.update({'params': params})
if denom:
info.update({'denom': denom})
if rhs:
info.update({'rhs': rhs})
if latex_reason:
info.update({'_latex_reason': latex_reason})
return json.dumps(info)
@app.route("/api/integral-substitution", methods=['POST'])
def integral_substitution():
data = json.loads(request.get_data().decode('utf-8'))
try:
expr = integral.parser.parse_expr(data['expr'])
except:
return jsonify({
'flag': False,
'reason': "%s is not a valid substitution expression." % data['expr']
})
rule = integral.rules.Substitution1(data['var_name'], expr)
problem = integral.parser.parse_expr(data['problem'])
if data['var_name'] == problem.var:
return jsonify({
'flag': False,
'reason': "%s is not a valid variable for substitution." % data['var_name']
})
try:
new_problem = rule.eval(problem)
new_problem_body = str(rule.f)
except:
return jsonify({
'flag': False,
'reason': "Substitution failed."
})
log = {
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'reason': "Substitution",
'location': data['location'],
'params': {
'f': new_problem_body,
'g': str(expr),
'var_name': str(data['var_name'])
},
'_latex_reason': "Substitute \\(%s\\) for \\(%s\\)" % (
integral.latex.convert_expr(integral.parser.parse_expr(data['var_name'])), integral.latex.convert_expr(expr)
)
}
log['checked'], log['proof'] = proof.translate_single_item(log, data['problem'], _loc="")
return jsonify({
'flag': True,
'log': log
})
@app.route("/api/integral-substitution2", methods=['POST'])
def integral_substitution2():
data = json.loads(request.get_data().decode('utf-8'))
try:
expr = integral.parser.parse_expr(data['expr'])
except:
return jsonify({
'flag': False,
'reason': "%s is not a valid expression" % data['expr']
})
rule = integral.rules.Substitution2(data['var_name'], expr)
problem = integral.parser.parse_expr(data['problem'])
new_problem = rule.eval(problem)
log = {
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'reason': "Substitution inverse",
'location': data['location'],
'params': {
'g': str(expr),
'var_name': str(data['var_name']),
"a": str(new_problem.lower),
"b": str(new_problem.upper)
},
'_latex_reason': "Substitute \\(%s\\) for \\(%s\\)" % (
integral.latex.convert_expr(integral.parser.parse_expr(problem.var)), integral.latex.convert_expr(expr)
)
}
log['checked'], log['proof'] = proof.translate_single_item(log, data['problem'])
return jsonify({
'flag': True,
'log': log
})
@app.route("/api/integral-validate-expr", methods=['POST'])
def integral_validate_expr():
data = json.loads(request.get_data().decode('utf-8'))
problem = integral.parser.parse_expr(data['problem'])
flag = None # if dollar is valid, flag = true
try:
dollar = integral.parser.parse_expr(data['dollar'])
if dollar.normalize() != problem.body.normalize():
return jsonify({
'flag': False
})
else:
# Do trig transform
select = integral.parser.parse_expr(data['select'])
dollar_location = dollar.get_location()
location = ""
if data["integral_location"] != "":
location = data["integral_location"] + ".0"
else:
location = "0"
if dollar_location != "":
location += "." + dollar_location
# location = data["integral_location"] + ".0." + dollar_location if data["integral_location"] != "" else "0." + dollar_location
new_trig_set = tuple(integral.expr.trig_transform(select, problem.var))
new_integral_set = [
integral.expr.Integral(problem.var, problem.lower, problem.upper, problem.body.replace_expr(dollar_location, t[0]))
for t in new_trig_set]
transform_info = []
for i in range(len(new_integral_set)):
step = {
"reason": "Rewrite trigonometric",
'text': str(new_integral_set[i]),
'latex': integral.latex.convert_expr(new_integral_set[i]),
"params":{
"rule": new_trig_set[i][1]
},
'_latex_reason': "Rewrite trigonometric \\(%s\\) to \\(%s\\)" %
(integral.latex.convert_expr(select), integral.latex.convert_expr(new_trig_set[i][0])),
# If there is only one integral in the full expression, location begins from the body;
# Else from the integral
"location": location
}
if dollar_location == "":
rel_loc = "0"
else:
rel_loc = "0."+dollar_location
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'], _loc=rel_loc)
transform_info.append(step)
return jsonify({
"flag": True,
"content": transform_info
})
except (exceptions.UnexpectedCharacters, exceptions.UnexpectedToken) as e:
return jsonify({
'flag': False
})
@app.route("/api/integral-validate-power-expr", methods=['POST'])
def integral_validate_power_expr():
data = json.loads(request.get_data().decode('utf-8'))
problem = integral.parser.parse_expr(data['problem'])
flag = None # if dollar is valid, flag = true
try:
dollar = integral.parser.parse_expr(data['dollar'])
if dollar.normalize() != problem.body.normalize():
return jsonify({
'flag': False
})
else:
select = integral.parser.parse_expr(data['select'])
if not (select.ty == integral.expr.OP and select.op == "^" and select.args[1].ty == integral.expr.CONST and Fraction(select.args[1].val).denominator == 1):
return jsonify({
'flag': False
})
dollar_location = dollar.get_location()
location = ""
if data["integral_location"] != "":
location = data["integral_location"] + ".0"
else:
location = "0"
if dollar_location != "":
location += "." + dollar_location
body = problem.body
body = body.replace_expr(dollar_location, integral.rules.UnfoldPower().eval(select))
new_integral = integral.expr.Integral(problem.var, problem.lower, problem.upper, body)
step = {
"flag": True,
"text": str(new_integral),
"latex": integral.latex.convert_expr(new_integral),
"location": location,
"reason": "Unfold power"
}
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'])
return jsonify(step)
except (exceptions.UnexpectedCharacters, exceptions.UnexpectedToken) as e:
return jsonify({
'flag': False
})
@app.route("/api/integral-validate-rewrite", methods=['POST'])
def integral_validate_rewrite():
data = json.loads(request.get_data().decode('utf-8'))
problem = integral.parser.parse_expr(data['problem'])
flag = None # if dollar is valid, flag = true
try:
dollar = integral.parser.parse_expr(data['dollar'])
if dollar.normalize() != problem.body.normalize():
return jsonify({
'flag': False
})
else:
# Do trig transform
select = integral.parser.parse_expr(data['select'])
dollar_location = dollar.get_location()
location = ""
if data["integral_location"] != "":
location = data["integral_location"] + ".0"
else:
location = "0"
if dollar_location != "":
location += "." + dollar_location
return jsonify({
"rewrite": str(select),
"flag": True,
"absolute_location": location, #location in the whole Integral
"relative_location": dollar_location # location in its own integral
})
except (exceptions.UnexpectedCharacters, exceptions.UnexpectedToken) as e:
return jsonify({
'flag': False
})
@app.route("/api/integral-rewrite-expr", methods=['POST'])
def integral_rewrite_expr():
data = json.loads(request.get_data().decode('utf-8'))
problem = integral.parser.parse_expr(data['problem'])
old_expr = integral.parser.parse_expr(data['old_expr'])
try:
new_expr = integral.parser.parse_expr(data['new_expr'])
location = data['relative_location']
if expand_multinomial(integral.expr.sympy_style(new_expr.normalize()).simplify()) != expand_multinomial(integral.expr.sympy_style(old_expr.normalize()).simplify()) or new_expr.findVar()[0].name != problem.var:
return jsonify({
'flag': False
})
new_problem = integral.expr.Integral(problem.var, problem.lower, problem.upper, problem.body.replace_expr(location, new_expr))
if location == "":
rel_loc = "0"
else:
rel_loc = "0." + location
if old_expr.ty == integral.expr.OP and old_expr.op == "/" or\
old_expr.ty == integral.expr.OP and old_expr.op == "*" and\
old_expr.args[1].ty == integral.expr.OP and old_expr.args[1].op == "^" and\
old_expr.args[1].args[1] == integral.expr.Const(-1):
denom = old_expr.args[1]
step = {
'flag': True,
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'reason': "Rewrite",
'_latex_reason': "Rewrite \\(%s\\) to \\(%s\\)"%(integral.latex.convert_expr(old_expr),
integral.latex.convert_expr(new_expr)),
'params': {
'rhs': data['new_expr'],
'denom': str(denom)
},
"location": data['absolute_location']
}
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'], _loc=rel_loc)
return jsonify(step)
else:
step = {
'flag': True,
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'reason': "Rewrite",
'_latex_reason': "Rewrite \\(%s\\) to \\(%s\\)"%(integral.latex.convert_expr(old_expr),
integral.latex.convert_expr(new_expr)),
'params': {
'rhs': data['new_expr']
},
"location": data['absolute_location']
}
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'], _loc=rel_loc)
return jsonify(step)
except (exceptions.UnexpectedCharacters, exceptions.UnexpectedToken) as e:
return jsonify({
'flag': False
})
@app.route("/api/integral-split", methods=['POST'])
def integral_split():
data = json.loads(request.get_data().decode('utf-8'))
problem = integral.parser.parse_expr(data['problem'])
point = integral.parser.parse_expr(data['point'])
assert integral.parser.parse_expr(problem.var) not in point.findVar()
upper = problem.upper
lower = problem.lower
if integral.expr.sympy_style(upper) <= integral.expr.sympy_style(point) or integral.expr.sympy_style(lower) >= integral.expr.sympy_style(point):
return jsonify({
"flag": 'fail'
})
new_integral1 = integral.expr.Integral(problem.var, problem.lower, point, problem.body)
new_integral2 = integral.expr.Integral(problem.var, point, problem.upper, problem.body)
step = {
"flag": 'success',
"reason": "Split region",
"location": data['location'],
"params": {
"c": str(point)
},
"text": str(new_integral1 + new_integral2),
"latex": integral.latex.convert_expr(new_integral1 + new_integral2)
}
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'])
return jsonify(step)
@app.route("/api/integral-integrate-by-parts", methods=['POST'])
def integral_integrate_by_parts():
data = json.loads(request.get_data().decode('utf-8'))
try:
parts_u = integral.parser.parse_expr(data['parts_u'])
except:
return jsonify({
"flag": False,
"reason": "%s is not valid expression." % data['parts_u']
})
try:
parts_v = integral.parser.parse_expr(data['parts_v'])
except:
return jsonify({
"flag": False,
"reason": "%s is not valid expression." % data['parts_v']
})
rule = integral.rules.IntegrationByParts(parts_u, parts_v)
problem = integral.parser.parse_expr(data['problem'])
try:
new_problem = rule.eval(problem)
except NotImplementedError as e:
return jsonify({
"flag": False,
"reason": str(e)
})
log = {
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'reason': "Integrate by parts",
'params': {
'parts_u': data['parts_u'],
'parts_v': data['parts_v'],
},
'_latex_reason': "Integrate by parts, \\(u = %s, v = %s\\)" % (
integral.latex.convert_expr(parts_u), integral.latex.convert_expr(parts_v)
),
'location': data['location']
}
log['checked'], log['proof'] = proof.translate_single_item(log, data['problem'])
return jsonify({
"flag": True,
"log": log
})
@app.route("/api/integral-equation-substitution", methods=['POST'])
def integral_equation_substitution():
data = json.loads(request.get_data().decode('utf-8'))
old_expr = integral.parser.parse_expr(data['problem']).body
new_expr = integral.parser.parse_expr(data['new_expr'])
rule = integral.rules.Equation(old_expr, new_expr)
problem = integral.parser.parse_expr(data['problem'])
new_problem = rule.eval(problem)
if new_problem != problem and new_problem != old_expr:
return jsonify({
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'_latex_reason': "Equation substitution successful, \\( %s\\) == \\(%s\\)" % (
integral.latex.convert_expr(old_expr), integral.latex.convert_expr(new_expr)
),
'flag': "success"
})
else:
return jsonify({
'flag': "fail",
"_latex_reason": "\\(%s != %s\\)" %
(integral.latex.convert_expr(old_expr), integral.latex.convert_expr(new_expr))
})
@app.route("/api/integral-polynomial-division", methods=['POST'])
def integral_polynomial_division():
data = json.loads(request.get_data().decode('utf-8'))
rule = integral.rules.PolynomialDivision()
problem = integral.parser.parse_expr(data['problem'])
body = problem.body
try:
new_body = rule.eval(body)
except:
return jsonify({
'flag': False,
'reason': "Can't do divison now."
})
rhs = integral.expr.Integral(problem.var, problem.lower, problem.upper, new_body)
location = data['location']
if location:
location += ".0"
else:
location = "0"
step = {
'flag': True,
'text': str(rhs),
'latex': integral.latex.convert_expr(rhs),
'params': {
'rhs': str(new_body)
},
'reason': "Rewrite fraction",
"location": location
}
step['checked'], step['proof'] = proof.translate_single_item(step, data['problem'])
return jsonify(step)
@app.route("/api/integral-save-file", methods=['POST'])
def integral_save_file():
data = json.loads(request.get_data().decode('utf-8'))
file_name = "integral/examples/%s" % data['filename']
with open(file_name, 'w', encoding='utf-8') as f:
json.dump({"content": data['content']}, f, indent=4, ensure_ascii=False, sort_keys=True)
return jsonify({
'status': 'success'
})
@app.route("/api/integral-slagle", methods=['POST'])
def integral_slagle():
data = json.loads(request.get_data().decode('utf-8'))
problem = data['problem']
t = 30
# limit slagle only run 60 seconds
rule = slagle.Slagle(t)
try:
# node = limit_bfs(slagle.OrNode(problem))
# new_problem = node.compute_value()
# t = [i.info() for i in node.trace()]
# return json.dumps(t)
node = rule.compute_node(problem)
steps = slagle.perform_steps(node)
init = problem
for step in steps:
step['checked'], step['proof'] = proof.translate_single_item(step, init)
init = step['text']
return json.dumps(steps)
except:
new_problem = integral.parser.parse_expr(problem)
return json.dumps([{
'text': str(new_problem),
'latex': integral.latex.convert_expr(new_problem),
'reason': "Slagle algorithm can't work"
}])
|
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
# Let's see how we would find outliers in a dataset
# First we'll seed the numpy generator
np.random.seed(12345)
# Next we'll create the dataframe
dframe = DataFrame(np.random.randn(1000, 4))
# Show preview
dframe.head()
# Lets describe the data
dframe.describe()
# Lets select the first column
col = dframe[0]
# NOw we can check which values in the column are greater than 3, for instance.
col[np.abs(col) > 3]
# So we now know in column[0], rows 523 and 900 have values with abs > 3
# How about all the columns?
# We can use the "any" method
dframe[(np.abs(dframe) > 3).any(1)]
# WE could also possibly cap the data at 3
dframe[np.abs(dframe) > 3] = np.sign(dframe) * 3
dframe.describe()
|
__all__ = ['VAE', '_loss_function', 'train', 'test']
from torchvision.datasets import FashionMNIST
import numpy as np
import torch
import torch as th
import torch.optim as optim
import torch.nn.functional as F
from torch import nn
from torch.utils.tensorboard import SummaryWriter
class VAE(nn.Module):
"""A classic VAE.
Params
------
input_dim : int
The size of the (flattened) image vector
latent_dim : int
The size of the latent memory
"""
def __init__(self, input_dim=784, latent_dim=20):
super(VAE, self).__init__()
# Set dims
self.input_dim = int(input_dim)
self.latent_dim = int(latent_dim)
# Init the layers in the deep net
self.fc1 = nn.Linear(self.input_dim, 400)
self.fc21 = nn.Linear(400, self.latent_dim)
self.fc22 = nn.Linear(400, self.latent_dim)
self.fc3 = nn.Linear(self.latent_dim, 400)
self.fc4 = nn.Linear(400, self.input_dim)
def encode(self, x):
"""Encode a torch tensor (batch_size, input_size)"""
x = x.view(-1, self.input_dim)
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def _reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
"""Expand a latent memory, to input_size."""
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def sample(self, n, device=None):
"""Use noise to sample n images from latent space."""
with torch.no_grad():
x = torch.randn(n, self.latent_dim)
x = x.to(device)
samples = self.decode(x)
return samples
def forward(self, x):
"""Get a reconstructed image"""
mu, logvar = self.encode(x)
z = self._reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def _loss_function(recon_x, x, mu, logvar, input_dim):
"""Reconstruction + KL divergence losses summed over all elements and batch"""
BCE = F.binary_cross_entropy(recon_x,
x.view(-1, input_dim),
reduction="sum")
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
def train(train_batch, model, optimizer, device, input_dim):
"""A single VAE training step"""
model.train()
batch = train_batch.to(device)
optimizer.zero_grad()
recon_batch, mu, logvar = model(train_batch)
loss = _loss_function(recon_batch, train_batch, mu, logvar, input_dim)
loss.backward()
optimizer.step()
return loss
def test(test_data, model, device, input_dim):
"""Test a VAE on a whole dataset"""
model.eval()
test_loss = 0
with torch.no_grad():
for i, (data, _) in enumerate(test_data):
data = data.to(device)
recon_batch, mu, logvar = model(data)
test_loss += _loss_function(recon_batch, data, mu, logvar,
input_dim).item()
return test_loss |
import numpy as np
from opswrapper.output import NodeRecorder, ElementRecorder
def test_node_recorder():
recorder = NodeRecorder(
file='/path/to/file',
nodes=1,
dofs=[1, 2],
response='disp',
)
generated = recorder.tcl_code()
expected = 'recorder Node -file {/path/to/file} -node 1 -dof 1 2 disp'
assert generated == expected
def test_node_recorder_pass_array():
recorder = NodeRecorder(
file=R'C:\Scratch\displacement.dat',
nodes=np.array([1, 2, 3, 4, 5]),
dofs=np.array([1, 2, 3, 4, 5, 6]),
response='disp',
)
generated = recorder.tcl_code()
expected = (
'recorder Node -file {C:/Scratch/displacement.dat} '
'-node 1 2 3 4 5 -dof 1 2 3 4 5 6 disp'
)
assert generated == expected
def test_node_recorder_windows_path():
recorder = NodeRecorder(
file=R'C:\Scratch\displacement.dat',
nodes=[1, 2, 3, 4, 5],
dofs=[1, 2, 3, 4, 5, 6],
response='disp',
)
generated = recorder.tcl_code()
expected = (
'recorder Node -file {C:/Scratch/displacement.dat} '
'-node 1 2 3 4 5 -dof 1 2 3 4 5 6 disp'
)
assert generated == expected
def test_node_recorder_delayed_file():
recorder = NodeRecorder(nodes=1, dofs=1, response='accel')
generated = recorder.tcl_code()
expected = 'recorder Node -file {{{file!s}}} -node 1 -dof 1 accel'
assert generated == expected
def test_node_recorder_delayed_file_then_format():
recorder = NodeRecorder(nodes=1, dofs=1, response='accel')
generated = recorder.tcl_code().format(file='/path/to/file')
expected = 'recorder Node -file {/path/to/file} -node 1 -dof 1 accel'
assert generated == expected
def test_node_recorder_all_nodes():
recorder = NodeRecorder(
file='/path/to/file',
nodes='all',
dofs=[1, 2, 3],
response='vel',
)
generated = recorder.tcl_code()
expected = 'recorder Node -file {/path/to/file} -node {*}[getNodeTags] -dof 1 2 3 vel'
assert generated == expected
def test_node_recorder_all_nodes_delayed_file():
recorder = NodeRecorder(nodes='all', dofs=1, response='accel')
generated = recorder.tcl_code()
expected = 'recorder Node -file {{{file!s}}} -node {{*}}[getNodeTags] -dof 1 accel'
assert generated == expected
def test_node_recorder_all_nodes_delayed_file_then_format():
recorder = NodeRecorder(nodes='all', dofs=1, response='accel')
generated = recorder.tcl_code().format(file='/path/to/file')
expected = 'recorder Node -file {/path/to/file} -node {*}[getNodeTags] -dof 1 accel'
assert generated == expected
def test_element_recorder():
recorder = ElementRecorder(
file='/path/to/file',
elements=1,
dofs=[1, 2],
response='localForce',
)
generated = recorder.tcl_code()
expected = 'recorder Element -file {/path/to/file} -ele 1 -dof 1 2 localForce'
assert generated == expected
def test_element_recorder_pass_array():
recorder = ElementRecorder(
file=R'C:\Scratch\forces.dat',
elements=np.array([1, 2, 3, 4, 5]),
dofs=np.array([1, 2, 3, 4, 5, 6]),
response='force',
)
generated = recorder.tcl_code()
expected = (
'recorder Element -file {C:/Scratch/forces.dat} '
'-ele 1 2 3 4 5 -dof 1 2 3 4 5 6 force'
)
assert generated == expected
def test_element_recorder_windows_path():
recorder = ElementRecorder(
file=R'C:\Scratch\forces.dat',
elements=[1, 2, 3, 4, 5],
dofs=[1, 2, 3, 4, 5, 6],
response='force',
)
generated = recorder.tcl_code()
expected = (
'recorder Element -file {C:/Scratch/forces.dat} '
'-ele 1 2 3 4 5 -dof 1 2 3 4 5 6 force'
)
assert generated == expected
def test_element_recorder_delayed_file():
recorder = ElementRecorder(elements=1, dofs=1, response='force')
generated = recorder.tcl_code()
expected = 'recorder Element -file {{{file!s}}} -ele 1 -dof 1 force'
assert generated == expected
def test_element_recorder_delayed_file_then_format():
recorder = ElementRecorder(elements=1, dofs=1, response='force')
generated = recorder.tcl_code().format(file='/path/to/file')
expected = 'recorder Element -file {/path/to/file} -ele 1 -dof 1 force'
assert generated == expected
def test_element_recorder_all_elements():
recorder = ElementRecorder(
file='/path/to/file',
elements='all',
dofs=[1, 2, 3],
response='globalForce',
)
generated = recorder.tcl_code()
expected = 'recorder Element -file {/path/to/file} -ele {*}[getEleTags] -dof 1 2 3 globalForce'
assert generated == expected
def test_element_recorder_all_elements_delayed_file():
recorder = ElementRecorder(elements='all', dofs=1, response='force')
generated = recorder.tcl_code()
expected = 'recorder Element -file {{{file!s}}} -ele {{*}}[getEleTags] -dof 1 force'
assert generated == expected
def test_element_recorder_all_elements_delayed_file_then_format():
recorder = ElementRecorder(elements='all', dofs=1, response='force')
generated = recorder.tcl_code().format(file='/path/to/file')
expected = 'recorder Element -file {/path/to/file} -ele {*}[getEleTags] -dof 1 force'
assert generated == expected
|
from sanic_openapi import doc
class UpdateRequestModel:
wallet = doc.String('Device wallet address', required=True)
class UpdateResponseModel:
update = doc.Boolean('Update status')
id = doc.String('ID of firmware file')
txhash = doc.String('Transaction hash of key')
class HashRequestModel:
hash = doc.String('MD5 Hash of downloaded file', required=True)
wallet = doc.String('Device wallet address', required=True)
|
import datetime
import calendar
import requests
import collections
import os
import json
import argparse
class Prom(object):
# url: base url for prometheus
#
def __init__(self, url, nseconds, end=None, host=None, start=None):
self.url = url
self.nseconds = nseconds
if start is None:
end = end or 0
self.end = calendar.timegm(
datetime.datetime.utcnow().utctimetuple()) - end
self.start = self.end - nseconds
else:
self.start = start
self.end = start + nseconds
self.headers = {}
if host is not None:
self.headers["Host"] = host
def fetch(self, query, groupby=None, xform=None):
resp = requests.get(self.url + "/api/v1/query_range", {
"query": query,
"start": self.start,
"end": self.end,
"step": "15"
}, headers=self.headers)
if not resp.ok:
raise Exception(str(resp))
data = resp.json()
return computeMinMaxAvg(data, groupby=groupby, xform=xform)
def fetch_cpu_by_container(self):
return self.fetch(
'rate(container_cpu_usage_seconds_total{container_name=~"mixer|policy|discovery|istio-proxy|captured|uncaptured"}[1m])',
metric_by_deployment_by_container,
to_miliCpus)
def fetch_memory_by_container(self):
return self.fetch(
'container_memory_usage_bytes{container_name=~"mixer|policy|discovery|istio-proxy|captured|uncaptured"}',
metric_by_deployment_by_container,
to_megaBytes)
def fetch_cpu_and_mem(self):
out = flatten(self.fetch_cpu_by_container(), "cpu_mili")
out.update(flatten(self.fetch_memory_by_container(), "mem_MB"))
return out
def fetch_by_response_code(self, query):
resp = requests.get(self.url + "/api/v1/query_range", {
"query": query,
"start": self.start,
"end": self.end,
"step": str(self.nseconds)
}, headers=self.headers)
if not resp.ok:
raise Exception(str(resp))
return resp.json()
def fetch_num_requests_by_response_code(self, code):
data = self.fetch_by_response_code(
'sum(rate(istio_requests_total{reporter="destination", response_code="' + str(code) + '"}[' + str(self.nseconds) + 's]))')
if len(data["data"]["result"]) > 0:
return data["data"]["result"][0]["values"]
return []
def fetch_500s_and_400s(self):
res = {}
data_404 = self.fetch_num_requests_by_response_code(404)
data_503 = self.fetch_num_requests_by_response_code(503)
data_504 = self.fetch_num_requests_by_response_code(504)
if len(data_404) > 0:
res["istio_requests_total_404"] = data_404[len(data_404)-1][1]
if len(data_503) > 0:
res["istio_requests_total_503"] = data_503[len(data_503)-1][1]
if len(data_504) > 0:
res["istio_requests_total_504"] = data_504[len(data_504)-1][1]
return res
def flatten(data, metric):
res = {}
for group, summary in data.items():
# remove - and istio- from group
grp = group.replace("istio-", "")
grp = grp.replace("-", "_")
grp = grp.replace("/", "_")
res[metric + "_min_" + grp] = summary[0]
res[metric + "_avg_" + grp] = summary[1]
res[metric + "_max_" + grp] = summary[2]
return res
# convert float bytes to in megabytes
def to_megaBytes(m):
return int(m / (1024 * 1024))
# convert float cpus to int mili cpus
def to_miliCpus(c):
return int(c * 1000.0)
DEPL_MAP = {
"fortioserver": "fortioserver_deployment",
"fortioclient": "fortio_deployment"
}
# returns deployment_name/container_name
def metric_by_deployment_by_container(metric):
depl = metric_by_deployment(metric)
if depl is None:
return None
mapped_name = depl
if depl in DEPL_MAP:
mapped_name = DEPL_MAP[depl]
return mapped_name + "/" + metric['container_name']
# These deployments have columns in the table, so only these are watched.
Watched_Deployments = set(["istio-pilot", "istio-telemetry",
"istio-policy", "fortioserver", "fortioclient"])
# returns deployment_name
def metric_by_deployment(metric):
depl = metric['pod_name'].rsplit('-', 2)[0]
if depl not in Watched_Deployments:
return None
return depl
def computeMinMaxAvg(d, groupby=None, xform=None):
if d['status'] != "success":
raise Exception("command not successful: " + d['status'] + str(d))
if d['data']['resultType'] != "matrix":
raise Exception("resultType not matrix: " + d['data']['resultType'])
"""
for res in d['data']['result']:
values = [float(v[1]) for v in res['values']]
res['values'] = ( min(values), sum(values)/len(values), max(values), len(values))
"""
ret = collections.defaultdict(list)
for result in d['data']['result']:
group = result['metric']['name']
if groupby is not None:
group = groupby(result['metric'])
if group is None:
continue
ret[group].append(result)
summary = {}
for group, lst in ret.items():
values = [float(v[1]) for v in lst[0]['values']]
for l in lst[1:]:
v = l['values']
for idx in range(len(values)):
values[idx] += float(v[idx][1])
s = (min(values), sum(values) / len(values), max(values), len(values))
if xform is not None:
s = (xform(s[0]), xform(s[1]), xform(s[2]), s[3])
summary[group] = s
return summary
def main(argv):
args = getParser().parse_args(argv)
p = Prom(args.url, args.nseconds, end=args.end, host=args.host)
out = p.fetch_cpu_and_mem()
resp_out = p.fetch_500s_and_400s()
out.update(resp_out)
indent = None
if args.indent is not None:
indent = int(args.indent)
print json.dumps(out, indent=indent)
def getParser():
parser = argparse.ArgumentParser(
"Fetch cpu and memory stats from prometheus")
parser.add_argument("url", help="prometheus base url")
parser.add_argument(
"nseconds", help="duration in seconds of the extract", type=int)
parser.add_argument(
"--end", help="relative time in seconds from now to end collection", type=int, default=0)
parser.add_argument(
"--host", help="host header when collection is thru ingress", default=None)
parser.add_argument(
"--indent", help="pretty print json with indent", default=None)
return parser
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))
|
def ssn_parser(ssn):
front, back = ssn.split('-')
sex = back[0]
if sex == '1' or sex == '2':
year = '19' + front[:2]
else:
year = '20' + front[:2]
if (int(sex) % 2) == 0:
sex = '여성'
else:
sex = '남성'
month = front[2:4]
day = front[4:6]
return year, month, day, sex
|
import logging
import google.cloud.logging
from google.cloud.logging.handlers import CloudLoggingHandler
def setup_logger():
# https://googleapis.dev/python/logging/latest/stdlib-usage.html
client = google.cloud.logging.Client()
handler = CloudLoggingHandler(client)
logger = logging.getLogger('cloudLogger')
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger |
from django.urls import path
from . import api
actor_api = [
path('actor/<actor_id>', api.get_actor, name='get_actor'),
path('group/<group_id>', api.get_group, name='get_group'),
]
|
import anyjson
from twisted.internet import defer, reactor
from txes import connection, exceptions
# TODO: Custom/better json serialiazation (e.g. handle date)
class ElasticSearch(object):
"""
A pyes compatable-ish elasticsearch twisted client.
Inspired by (code stolen from) pyes and paisley
"""
def __init__(self, servers=None, timeout=None, bulkSize=400,
discover=True, retryTime=10, discoveryInterval=300,
defaultIndexes=None, autorefresh=False):
if isinstance(servers, basestring):
servers = [servers]
else:
servers = servers
if not defaultIndexes:
defaultIndexes = ["_all"]
elif isinstance(defaultIndexes, basestring):
defaultIndexes = [defaultIndexes]
self.defaultIndexes = defaultIndexes
self.timeout = timeout
self.bulkSize = bulkSize
self.retryTime = retryTime
self.discoveryInterval = discoveryInterval
self.autorefresh = autorefresh
self.refreshed = True
self.info = {}
self.bulkData = []
self.connection = connection.connect(servers=servers,
timeout=timeout,
retryTime=retryTime)
if discover:
self._performDiscovery()
else:
def cb(data):
self.custerName = data["cluster_name"]
d = self.clusterNodes()
d.addCallback(cb)
def _makePath(self, components):
return '/' + '/'.join([str(c) for c in components if c])
def _performDiscovery(self):
def cb(data):
self.cluster_name = data["cluster_name"]
for node in data["nodes"]:
httpAddr = data["nodes"][node].get("http_address")
if not httpAddr:
continue
server = httpAddr.strip("inet[/]")
self.connection.addServer(server)
reactor.callLater(self.discoveryInterval,
self._performDiscovery)
d = self.clusterNodes()
d.addCallback(cb)
def _sendQuery(self, queryType, query, indexes=None, docTypes=None,
**params):
def sendIt(_):
indices = self._validateIndexes(indexes)
dt = docTypes
if dt is None:
dt = []
elif isinstance(dt, basestring):
dt = [dt]
path = self._makePath([','.join(indices), ','.join(dt),
queryType])
d = self._sendRequest("GET", path, body=query, params=params)
return d
if self.autorefresh and not self.refreshed:
d = self.refresh(indexes)
d.addCallback(sendIt)
return d
else:
return sendIt(None)
def _sendRequest(self, method, path, body=None, params=None):
d = defer.maybeDeferred(self.connection.execute,
method, str(path), body, params)
return d
def _validateIndexes(self, indexes=None):
indices = indexes or self.defaultIndexes
if isinstance(indices, basestring):
return [indices]
return indices
def status(self, indexes=None):
"""
Retrieve the status of one or more indices
"""
indices = self._validateIndexes(indexes)
path = self._makePath([','.join(indices), "_status"])
d = self._sendRequest("GET", path)
return d
def createIndex(self, index, settings=None):
"""
Creates and index with the optional settings dict.
"""
d = self._sendRequest("PUT", index, settings)
return d
def createIndexIfMissing(self, index, settings=None):
def eb(failure):
failure.trap(exceptions.IndexAlreadyExistsException)
return {u'acknowledged': True, u'ok': True}
d = self.createIndex(index, settings)
return d.addErrback(eb)
def deleteIndex(self, index):
"""
Deletes and index.
"""
d = self._sendRequest("DELETE", index)
return d
def deleteIndexIfExists(self, index):
def eb(failure):
failure.trap(exceptions.IndexMissingException,
exceptions.NotFoundException)
return {u'acknowledged': True, u'ok': True}
d = self.deleteIndex(index)
return d.addErrback(eb)
def getIndices(self, includeAliases=False):
"""
Get a dict holding an entry for each index which exits.
If includeAliases is True, the dict will also contain entries for
aliases.
The key for each entry in the dict is the index or alias name. The
value is a dict holding the following properties:
- num_docs: Number of ducuments in the index or alias.
- alias_for: Only present for an alias: hols a list of indicis
which this is an alias for.
"""
def factor(status):
result = {}
indices = status["indices"]
for index in sorted(indices):
info = indices[index]
numDocs = info["docs"]["num_docs"]
result[index] = {"num_docs": numDocs}
if not includeAliases:
continue
for alias in info["aliases"]:
if alias not in result:
result[alias] = dict()
aliasDocs = result[alias].get("num_docs", 0) + numDocs
result[alias]["num_docs"] = aliasDocs
if "alias_for" not in result[alias]:
result[alias]["alias_for"] = list()
result[alias]["alias_for"].append(index)
return result
d = self.status()
return d.addCallback(factor)
def getAlias(self, alias):
"""
Return a list of indices pointed to by a given alias.
Raises IndexMissionException if the alias does not exist.
"""
def factor(status):
return status["indices"].keys()
d = self.status(alias)
return d.addCallback(factor)
def changeAliases(self, *commands):
"""
Change the aliases stored.
A command is a tuple of (["add"|"remove"], index, alias)
You may specify multiple commands as additional arguments
"""
actions = [{c: {"index": i, "alias": a}} for c, i, a in commands]
d = self._sendRequest("POST", "_aliases", {"actions": actions})
return d
def addAlias(self, alias, indices):
"""
Add an alias to point to a set of indices.
"""
if isinstance(indices, basestring):
indices = [indices]
return self.changeAliases(*[("add", i, alias) for i in indices])
def deleteAlias(self, alias, indices):
"""
Delete an alias
"""
if isinstance(indices, basestring):
indices = [indices]
return self.changeAliases(*[("remove", i, alias) for i in indices])
def setAlias(self, alias, indices):
"""
Set and alias (possibly removing what it already points to)
"""
def eb(failure):
failure.trap(exceptions.IndexMissingException)
return self.addAlias(alias, indices)
def factor(old_indices):
commands = [["remove", i, alias] for i in old_indices]
commands.extend([["add", i, alias] for i in indices])
if len(commands):
return self.changeAliases(*commands)
if isinstance(indices, basestring):
indices = [indices]
d = self.getAlias(alias)
d.addCallbacks(factor, eb)
return d
def closeIndex(self, index):
"""
Close an index.
"""
d = self._sendRequest("POST", "/%s/_close" % index)
return d
def openIndex(self, index):
"""
Open an index.
"""
d = self._sendRequest("POST", "/%s/_open" % index)
return d
def flush(self, indexes=None, refresh=None):
def flushIt(_):
indices= self._validateIndexes(indexes)
path = self._makePath([','.join(indices), "_flush"])
params = None
if refresh:
params["refresh"] = True
d = self._sendRequest("POST", path, params=params)
return d
if self.bulkData:
d = self.forceBulk()
d.addCallback(flushIt)
return d
else:
return flushIt()
def refresh(self, indexes=None, timesleep=1):
def wait(results):
d = self.clusterHealth(waitForStatus="green")
d.addCallback(lambda _: results)
self.refreshed = True
return d
def delay(results):
d = defer.Deferred()
reactor.callLater(timesleep, d.callback, results)
d.addCallback(wait)
return d
def refreshIt(_):
indices= self._validateIndexes(indexes)
path = self._makePath([','.join(indices), "_refresh"])
d = self._sendRequest("POST", path)
d.addCallback(delay)
return d
if self.bulkData:
d = self.forceBulk()
d.addCallback(refreshIt)
return d
else:
return refreshIt()
def optimize(self, indexes=None, waitForMerge=False,
maxNumSegments=None, onlyExpungeDeletes=False,
refresh=True, flush=True):
"""
Optimize one or more indices.
"""
def done(results):
self.refreshed = True
return results
indices = self._validateIndexes(indexes)
path = self._makePath([','.join(indices), "_optimize"])
params = {"wait_for_merge": waitForMerge,
"only_expunge_deletes": onlyExpungeDeletes,
"refesh": refresh,
"flush": flush}
if maxNumSegments:
params["max_num_segments"] = maxNumSegments
d = self._sendRequest("POST", path, params=params)
d.addCallback(done)
return d
def analyze(self, text, index, analyzer=None):
"""
Perfoms the analysis process on a text and returns the tokens
breakdown of the text
"""
if analyzer:
analyzer = {"analyzer": analyzer}
body = {"text": text}
path = self._makePath([index, "_analyze"])
d = self._sendRequest("POST", path, body=body, params=analyzer)
return d
def gatewaySnapshot(self, indexes=None):
"""
Gateway shapshot one or more indices
"""
indices = self._validateIndexes(indexes)
path = self._makePath([','.join(indices), "_gateway", "snapshot"])
d = self._sendRequest("POST", path)
return d
def putMapping(self, docType, mapping, indexes=None):
"""
Register specific mapping definition for a specific type against
one or more indices.
"""
indices = self._validateIndexes(indexes)
path = self._makePath([','.join(indices), docType, "_mapping"])
if docType not in mapping:
mapping = {docType: mapping}
self.refreshed = False
d = self._sendRequest("PUT", path, body=mapping)
return d
def getMapping(self, docType=None, indexes=None):
"""
Get the mapping definition
"""
indices = self._validateIndexes(indexes)
parts = [','.join(indices)]
if docType:
parts.append(docType)
parts.append("_mapping")
d = self._sendRequest("GET", self._makePath(parts))
return d
def collectInfo(self):
"""
Collect info about the connection and fill the info dicionary
"""
def factor(result):
self.info = {}
self.info['server'] = {}
self.info['server']['name'] = result['name']
self.info['server']['version'] = result['version']
self.info['allinfo'] = result
self.info['status'] = self.status(["_all"])
return self.info
d = self._sendRequest("GET", '/')
d.addCallback(factor)
return d
def clusterHealth(self, level="cluster", waitForStatus=None,
waitForRelocatingShards=None, waitForNodes=None,
timeout=30):
"""
Check the current cluster health
"""
path = self._makePath(["_cluster", "health"])
if level not in ("cluster", "indices", "shards"):
raise ValueError("Invalid level: %s" % level)
mapping = {"level": level}
if waitForStatus:
if waitForStatus not in ("green", "yellow", "red"):
raise ValueError("Invalid waitForStatus: %s" % waitForStatus)
mapping["wait_for_status"] = waitForStatus
if waitForRelocatingShards:
mapping["wait_for_relocating_shards"] = waitForRelocatingShards
if waitForNodes:
mapping["wait_for_nodes"] = waitForNodes
if waitForStatus or waitForRelocatingShards or waitForNodes:
mapping["timeout"] = timeout
d = self._sendRequest("GET", path, mapping)
return d
def clusterState(self, filterNodes=None, filterRoutingTable=None,
filterMetadata=None, filterBlocks=None,
filterIndices=None):
"""
Retrieve the cluster state
"""
path = self._makePath(["_cluster", "state"])
params = {}
if filterNodes:
params['filter_nodes'] = filterNodes
if filterRoutingTable:
params['filter_routing_table'] = filterRoutingTable
if filterMetadata:
params['filter_metadata'] = filterMetadata
if filterBlocks:
params['filter_blocks'] = filterBlocks
if filterIndices:
if isinstance(filterIndices, basestring):
params['filter_indices'] = filterIndices
else:
params['filter_indices'] = ','.join(filterIndices)
d = self._sendRequest("GET", path, params=params)
return d
def clusterNodes(self, nodes=None):
parts = ["_cluster", "nodes"]
if nodes:
parts.append(','.join(nodes))
path = self._makePath(parts)
d = self._sendRequest("GET", path)
return d
def clusterStats(self, nodes=None):
"""
The cluster nodes info API
"""
parts = ["_cluster", "nodes"]
if nodes:
parts.append(','.join(nodes))
parts.append("stats")
path = self._makePath(parts)
d = self._sendRequest("GET", path)
return d
def index(self, doc, index, docType, id=None, parent=None,
forceInsert=None, bulk=False, version=None,
querystringArgs=None):
"""
Index a dict into a specific index and make it searchable
"""
self.refreshed = False
if bulk:
optype = "index"
if forceInsert:
optype = "create"
cmd = {optype: {"_index": index, "_type": docType}}
if parent:
cmd[optype]["_parent"] = parent
if version:
cmd[optype]["_version"] = version
if id:
cmd[optype]["_id"] = id
data = '\n'.join([anyjson.serialize(cmd),
anyjson.serialize(doc)])
self.bulkData.append(data)
return self.flushBulk()
if not querystringArgs:
querystringArgs = {}
if forceInsert:
querystringArgs["opType"] = "create"
if parent:
querystringArgs["parent"] = parent
if version:
querystringArgs["version"] = version
if id:
requestMethod = "PUT"
else:
requestMethod = "POST"
path = self._makePath([index, docType, id])
d = self._sendRequest(requestMethod, path, body=doc,
params=querystringArgs)
return d
def flushBulk(self, forced=False):
"""
Wait to process all pending operations
"""
if not forced and len(self.bulkData) < self.bulkSize:
return defer.succeed(None)
return self.forceBulk()
def forceBulk(self):
"""
Force executing of all bulk data
"""
if not len(self.bulkData):
return defer.succeed(None)
data = '\n'.join(self.bulkData) + '\n'
d = self._sendRequest("POST", "/_bulk", body=data)
self.bulkData = []
return d
def delete(self, index, docType, id, bulk=False):
"""
Delete a typed JSON document from a specific index based on its id.
"""
if bulk:
cmd = {"delete": {"_index": index,
"_type": docType,
"_id": id}}
self.bulkData.append(anyjson.serialize(cmd))
return self.flushBulk()
path = self._makePath([index, docType, id])
d = self._sendRequest("DELETE", path)
return d
def deleteByQuery(self, indexes, docTypes, query, **params):
"""
Delete documents from one or more indexes and one or more types
based on query.
"""
indices = self._validateIndexes(indexes)
if not docTypes:
docTypes = []
elif isinstance(docTypes, basestring):
docTypes = [docTypes]
path = self._makePath([','.join(indices), ','.join(docTypes),
"_query"])
d = self._sendRequest("DELETE", path, params=params)
return d
def deleteMapping(self, index, docType):
"""
Delete a document type from a specific index.
"""
path = self._makePath([index, docType])
d = self._sendRequest("DELETE", path)
return d
def get(self, index, docType, id, fields=None, routing=None, **params):
"""
Get a typed document form an index based on its id.
"""
path = self._makePath([index, docType, id])
if fields:
params["fields"] = ','.join(fields)
if routing:
params["routings"] = routing
d = self._sendRequest("GET", path, params=params)
return d
def search(self, query, indexes=None, docType=None, **params):
"""
Execute a search agains one or more indices
"""
indices = self._validateIndexes(indexes)
d = self._sendQuery("_search", query, indices, docType, **params)
return d
def scan(self, query, indexes=None, docType=None, scrollTimeout="10m",
**params):
"""
Return an iterator which will scan against one or more indices.
Each call to next() will yeild a deferred that will contain the
next dataset
"""
this = self
class Scroller(object):
def __init__(self, results):
self.results = results
def __iter__(self):
return self
def _setResults(self, results):
if not len(results["hits"]["hits"]):
raise StopIteration
self.results = results
return results
def next(self):
scrollId = self.results["_scroll_id"]
d = this._sendRequest("GET", "_search/scroll", scrollId,
{"scroll": scrollTimeout})
d.addCallback(self._setResults)
return d
def scroll(results):
return Scroller(results)
d = self.search(query=query, indexes=indexes, docType=docType,
search_type="scan", scroll=scrollTimeout, **params)
d.addCallback(scroll)
return d
def reindex(self, query, indexes=None, docTypes=None, **params):
"""
Execute a search query against one or more indices and reindex the
hits.
"""
indices = self._validateIndexes(indexes)
if not docTypes:
docTypes = []
elif isinstance(docTypes, basestring):
docTypes = [docTypes]
path = self._makePath([','.join(indices), ','.join(docTypes),
"_reindexbyquery"])
d = self._sendRequest("POST", path, body=query, params=params)
return d
def count(self, query, indexes=None, docTypes=None, **params):
"""
Execute a query against one or more indices and get the hit count
"""
indices = self._validateIndexes(indexes)
d = self._sendQuery("_count", query, indices, docTypes, **params)
return d
def createRiver(self, river, riverName=None):
"""
Create a river
"""
if not riverName:
riverName = river["index"]["index"]
d = self._sendRequest("PUT", "/_river/%s/_meta" % riverName,
body=river)
return d
def deleteRiver(self, river, riverName=None):
"""
Delete a river
"""
if not riverName:
riverName = river["index"]["index"]
d = self._sendRequest("DELETE", "/_river/%s/" % riverName)
return d
def moreLikeThis(self, index, docType, id, fields, **params):
"""
Execute a "more like this" search query against on eor more fields.
"""
path = self._makePath([index, docType, id, "_mlt"])
params["fields"] = ','.join(fields)
d = self._sendRequest("GET", path, params=params)
return d
def updateSettings(self, index, settings):
"""
Update settings of an index.
"""
path = self._makePath([index, "_settings"])
d = self._sendRequest("PUT", path, body=settings)
return d
@property
def servers(self):
return self.connection.servers
|
from .apps import api
from . import forms, models
from .apps import error_response
from django.http import JsonResponse
import base64, logging, traceback, json
from django.contrib.auth import authenticate
from django.views.decorators.csrf import csrf_exempt
log = logging.getLogger(__name__)
@csrf_exempt
def log_in(request):
""" Login with username and password """
form = forms.LoginForm(request.POST)
if form.is_valid():
try:
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
print('username:'+form.cleaned_data['username'],'password:'+form.cleaned_data['password'])
if user is not None:
# the password verified for the user
print('correct password',user.is_active)
if user.is_active:
data = {"Name": user.get_username(), "UserID": user.id, "Modules": []}
modules = models.get_user_modules(user)
for m in modules:
dict_ = m.module.to_dict()
dict_['Permission'] = m.permission
data['Modules'].append(dict_)
print(data)
return JsonResponse({'data': data})
except:
log.error(traceback.format_exc())
print('responsing')
return error_response(2)
@csrf_exempt
def update_module(request):
""" When choose one module, provide module profile and AuthToken
to update student list from ivle to face server
"""
form = forms.DataForm(request.POST)
if form.is_valid():
try:
data = json.loads(form.cleaned_data['data'])
print('owner:',form.cleaned_data['owner'])
# return person profile order by person name
exist_list = api.get_persons_by_group(group=data.get('face_group_id'))
new_list = models.Student.objects.filter(module_id=data.get('ID'))
tutors_list = [ump.user for ump in models.User_Module_Permission.objects.filter(module__id=data.get('ID'), permission='M')]
tutored_students = [s.student.to_dict() for s in models.get_my_student_in_module(tutor=form.cleaned_data['owner'], module_id=data.get('ID'))]
new_tutors_list = []
for i in range(len(tutors_list)):
myss = [ts.student.to_dict() for ts in models.get_my_student_in_module(tutors_list[i],data.get('ID'))];
new_tutors_list.append({
'username': tutors_list[i].username,
'myss_len': len(myss)
})
# Relationship with data between face and ivle:
# face: name -> IVLE: UserID - A0123456
# face: first_name -> IVLE: Name - James Smith
# face: note -> IVLE: UserGuid - xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if data and new_list :
# if has no persons in face, initial with a empty list
exist_list = exist_list if exist_list else []
# Sort new_list by UserID
new_list = [(str(s.to_dict()['id']), s.to_dict()) for s in new_list]
new_list.sort()
new_list = [dict_ for (key, dict_) in new_list]
# iterate to match with each other
new_p, old_p = 0, 0
new, old = [False]*len(new_list), [False]*len(exist_list)
while old_p < len(exist_list) and new_p < len(new_list):
if str(new_list[new_p].get('id')) == exist_list[old_p].get('name'):
new[new_p] = True
old[old_p] = True
new_p += 1
elif str(new_list[new_p].get('id')) < exist_list[old_p].get('name'):
new_p += 1
old_p -= 1
old_p += 1
# Add new item to face database
updated_list, add_list = [], []
#print([p.get('id') for p in new_list], [p.get('name') for p in exist_list])
for i in range(len(new)):
if new[i] is False:
add_list.append({'name': new_list[i].get('id'),
'email': new_list[i].get('email'),
'first_name': new_list[i].get('name'),
'last_name': new_list[i].get('last_name'),
'note': new_list[i].get('note')})
if add_list:
new_persons_id = api.create_json_person(data=add_list, group=data.get('face_group_id'))
if new_persons_id:
if None in new_persons_id:
data['error_add'] = []
for i in range(len(new_persons_id)):
if new_persons_id[i] is None:
data['error_add'].append(dict(add_list[i]))
else:
updated_list.append(dict(add_list[i]))
updated_list[-1]['id'] = int(new_persons_id[i])
else:
data['error_add'] = add_list
for i in range(len(old)):
if old[i] is True:
updated_list.append(dict(exist_list[i]))
# Delete relation of item to group, not delete items
for i in [j for j in range(len(old)) if old[j] is True][::-1]:
del exist_list[i]
if exist_list:
delete_result = api.delete_person(person=[i.get('id') for i in exist_list],
group=data.get('face_group_id'))
if delete_result:
if False in [list(d.values())[0] for d in delete_result]:
data['error_delete'] = []
for i in range(len(delete_result)):
data['error_delete'].append(list(delete_result[i].keys())[0])
else:
data['error_delete'] = exist_list
del exist_list, new_list, add_list, tutors_list
data['student'] = updated_list
data['attendance'] = models.get_records(data.get('ID'))
else:
data['student'] = []
data['attendance'] = []
data['tutors'] = new_tutors_list
data['tutorial'] = tutored_students
return JsonResponse({'data': data})
except:
log.error(traceback.format_exc())
return error_response(1, name='update_module')
|
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template import RequestContext
from .models import Sticker, StickerVote, find_key
from .forms import StickerForm
@login_required
def sticker_submit(request):
if request.method == "POST":
form = StickerForm(request.POST, request.FILES)
if form.is_valid():
sticker = form.save()
sticker.speaker = request.user
sticker.save()
return redirect("sticker_review")
else:
form = StickerForm()
return render_to_response("stickers/submit.html", {
"form": form,
}, context_instance=RequestContext(request))
@login_required
def sticker_detail(request, pk):
sticker = get_object_or_404(Sticker, pk=pk)
return render_to_response("stickers/detail.html", {
"sticker": sticker,
}, context_instance=RequestContext(request))
@login_required
def sticker_vote(request, pk):
sticker = get_object_or_404(Sticker, pk=pk)
stickervote, created = StickerVote.objects.get_or_create(sticker=sticker, user=request.user)
return redirect("sticker_review")
@login_required
def sticker_review(request):
stickers = Sticker.objects.all()
return render_to_response("stickers/review.html", {
"stickers": stickers,
}, context_instance=RequestContext(request))
def lets_encrypt(request, token):
key = find_key(token)
return HttpResponse(key)
def lets_encrypt2(request):
return HttpResponse("1YdmQxzzmvPfBBp-pJlDMQMufuSFC6fJ11NwC8yPPRU.b6m5qKJTFMPZgBdDHWv1cU_zUprSrr15yWJ_CKofr0o")
|
import json
from enum import IntEnum
from test import test_utils
class ECRScanFailureException(Exception):
"""
Base class for other exceptions
"""
pass
class CVESeverity(IntEnum):
UNDEFINED = 0
INFORMATIONAL = 1
LOW = 2
MEDIUM = 3
HIGH = 4
CRITICAL = 5
class ScanVulnerabilityList:
"""
ScanAllowList is a class that reads an OS vulnerability allow-list, in the format stored on the DLC repo,
to allow easy comparison of any ECR Scan Vulnerabilities on an image with its corresponding allow-list file.
"""
def __init__(self, minimum_severity=CVESeverity["MEDIUM"]):
self.vulnerability_list = {}
self.minimum_severity = minimum_severity
def construct_allowlist_from_file(self, file_path):
"""
Read JSON file and prepare the object with all allowed vulnerabilities
:param file_path: Path to the allow-list JSON file.
:return: dict self.vulnerability_list
"""
with open(file_path, "r") as f:
file_allowlist = json.load(f)
for package_name, package_vulnerability_list in file_allowlist.items():
for vulnerability in package_vulnerability_list:
if CVESeverity[vulnerability["severity"]] >= self.minimum_severity:
if package_name not in self.vulnerability_list:
self.vulnerability_list[package_name] = []
self.vulnerability_list[package_name].append(vulnerability)
return self.vulnerability_list
def construct_allowlist_from_ecr_scan_result(self, vulnerability_list):
"""
Read a vulnerability list and construct the vulnerability_list
:param vulnerability_list: list ECR Scan Result results
:return: dict self.vulnerability_list
"""
for vulnerability in vulnerability_list:
package_name = get_ecr_vulnerability_package_name(vulnerability)
if package_name not in self.vulnerability_list:
self.vulnerability_list[package_name] = []
if CVESeverity[vulnerability["severity"]] >= self.minimum_severity:
self.vulnerability_list[package_name].append(vulnerability)
return self.vulnerability_list
def __contains__(self, vulnerability):
"""
Check if an input vulnerability exists on the allow-list
:param vulnerability: dict JSON object consisting of information about the vulnerability in the format
presented by the ECR Scan Tool
:return: bool True if the vulnerability is allowed on the allow-list.
"""
package_name = get_ecr_vulnerability_package_name(vulnerability)
if package_name not in self.vulnerability_list:
return False
for allowed_vulnerability in self.vulnerability_list[package_name]:
if are_vulnerabilities_equivalent(vulnerability, allowed_vulnerability):
return True
return False
def __cmp__(self, other):
"""
Compare two ScanVulnerabilityList objects for equivalence
:param other: Another ScanVulnerabilityList object
:return: True if equivalent, False otherwise
"""
if not other or not other.vulnerability_list:
return not self.vulnerability_list
if sorted(self.vulnerability_list.keys()) != sorted(other.vulnerability_list.keys()):
return False
for package_name, package_vulnerabilities in self.vulnerability_list.items():
if len(self.vulnerability_list[package_name]) != len(other.vulnerability_list[package_name]):
return False
for v1, v2 in zip(
sorted(self.vulnerability_list[package_name]), sorted(other.vulnerability_list[package_name])
):
if not are_vulnerabilities_equivalent(v1, v2):
return False
return True
def __sub__(self, other):
"""
Difference between ScanVulnerabilityList objects
:param other: Another ScanVulnerabilityList object
:return: List of vulnerabilities that exist in self, but not in other
"""
if not self.vulnerability_list:
return None
if not other or not other.vulnerability_list:
return self
missing_vulnerabilities = [
vulnerability
for package_vulnerabilities in self.vulnerability_list.values()
for vulnerability in package_vulnerabilities
if vulnerability not in other
]
if not missing_vulnerabilities:
return None
difference = ScanVulnerabilityList(minimum_severity=self.minimum_severity)
difference.construct_allowlist_from_ecr_scan_result(missing_vulnerabilities)
return difference
def are_vulnerabilities_equivalent(vulnerability_1, vulnerability_2):
"""
Check if two vulnerability JSON objects are equivalent
:param vulnerability_1: dict JSON object consisting of information about the vulnerability in the format
presented by the ECR Scan Tool
:param vulnerability_2: dict JSON object consisting of information about the vulnerability in the format
presented by the ECR Scan Tool
:return: bool True if the two input objects are equivalent, False otherwise
"""
if (vulnerability_1["name"], vulnerability_1["severity"]) == (vulnerability_2["name"], vulnerability_2["severity"]):
# Do not compare package_version, because this may have been obtained at the time the CVE was first observed
# on the ECR Scan, which would result in unrelated version updates causing a mismatch while the CVE still
# applies on both vulnerabilities.
if all(
attribute in vulnerability_2["attributes"]
for attribute in vulnerability_1["attributes"]
if not attribute["key"] == "package_version"
):
return True
return False
def get_ecr_vulnerability_package_name(vulnerability):
"""
Get Package Name from a vulnerability JSON object
:param vulnerability: dict JSON object consisting of information about the vulnerability in the format
presented by the ECR Scan Tool
:return: str package name
"""
for attribute in vulnerability["attributes"]:
if attribute["key"] == "package_name":
return attribute["value"]
return None
def get_ecr_vulnerability_package_version(vulnerability):
"""
Get Package Version from a vulnerability JSON object
:param vulnerability: dict JSON object consisting of information about the vulnerability in the format
presented by the ECR Scan Tool
:return: str package version
"""
for attribute in vulnerability["attributes"]:
if attribute["key"] == "package_version":
return attribute["value"]
return None
def get_ecr_scan_allowlist_path(image_uri):
dockerfile_location = test_utils.get_dockerfile_path_for_image(image_uri)
image_scan_allowlist_path = dockerfile_location + ".os_scan_allowlist.json"
# Each example image (tied to CUDA version/OS version/other variants) can have its own list of vulnerabilities,
# which means that we cannot have just a single allowlist for all example images for any framework version.
if "example" in image_uri:
image_scan_allowlist_path = dockerfile_location + ".example.os_scan_allowlist.json"
return image_scan_allowlist_path
|
from flask import current_app
from loguru import logger
from baldrick.github.github_api import RepoHandler, PullRequestHandler
from baldrick.blueprints.github import github_webhook_handler
from baldrick.utils import insert_special_message
__all__ = ['pull_request_handler']
PULL_REQUEST_CHECKS = dict()
def pull_request_handler(actions=None):
"""
A decorator to add functions to the pull request checker.
By default, functions decorated with this decorator will be passed events
which match the following actions:
* unlabeled
* labeled
* synchronize
* opened
* milestoned
* demilestoned
However, you may pass in a list of strings with subsets of these actions to
control when the checks are run.
They will be passed ``(pr_handler, repo_handler)`` and are expected to
return a dictionary where the key is a unique string that refers to the
specific check that has been made, and the values are dictionaries with
the following keys:
* ``status`` is a string giving the state for the latest commit (one of
``success``, ``failure``, ``error``, or ``pending``).
* ``message``: the message to be shown in the status
* ``target_url`` (optional): a URL to link to in the status
"""
if callable(actions):
# Decorator is being used without brackets and the actions argument
# is just the function itself.
PULL_REQUEST_CHECKS[actions] = None
return actions
else:
def wrapper(func):
PULL_REQUEST_CHECKS[func] = actions
return func
return wrapper
@github_webhook_handler
def handle_pull_requests(repo_handler, payload, headers):
"""
Handle pull request events which match the following event types:
"""
event = headers['X-GitHub-Event']
if event not in ('pull_request', 'issues'):
return "Not a pull_request or issues event"
# We only need to listen to certain kinds of events:
if event == 'pull_request':
if payload['action'] not in ('unlabeled', 'labeled', 'synchronize', 'opened'):
return "Action '" + payload['action'] + "' does not require action"
elif event == 'issues':
if payload['action'] not in ('milestoned', 'demilestoned'):
return "Action '" + payload['action'] + "' does not require action"
if event == 'pull_request':
number = payload['pull_request']['number']
elif event == 'issues':
number = payload['issue']['number']
else:
return "Not an issue or pull request"
is_new = (event == 'pull_request') & (payload['action'] == 'opened')
logger.debug(f"Processing event {event} #{number} on {repo_handler.repo}")
return process_pull_request(
repo_handler.repo, number, repo_handler.installation,
action=payload['action'], is_new=is_new)
def process_pull_request(repository, number, installation, action,
is_new=False):
# TODO: cache handlers and invalidate the internal cache of the handlers on
# certain events.
pr_handler = PullRequestHandler(repository, number, installation)
pr_config = pr_handler.get_config_value("pull_requests", {})
if not pr_config.get("enabled", False):
msg = "Skipping PR checks, disabled in config."
logger.debug(msg)
return msg
# Don't comment on closed PR
if pr_handler.is_closed:
return "Pull request already closed, no need to check"
repo_handler = RepoHandler(pr_handler.head_repo_name,
pr_handler.head_branch, installation)
# First check whether there are labels that indicate the checks should be
# skipped
skip_labels = pr_config.get("skip_labels", [])
skip_fails = pr_config.get("skip_fails", True)
for label in pr_handler.labels:
if label in skip_labels:
if skip_fails:
pr_handler.set_check(
current_app.bot_username,
"Skipping checks due to {0} label".format(label),
status='completed', conclusion='failure')
return
results = {}
for function, actions in PULL_REQUEST_CHECKS.items():
if actions is None or action in actions:
result = function(pr_handler, repo_handler)
# Ignore skipped checks
if result is not None:
results.update(result)
# Special message for a special day
not_boring = pr_handler.get_config_value('not_boring', cfg_default=True)
if not_boring: # pragma: no cover
special_msg = ''
if is_new: # Always be snarky for new PR
special_msg = insert_special_message('')
else:
import random
tensided_dice_roll = random.randrange(10)
if tensided_dice_roll == 9: # 1 out of 10 for subsequent remarks
special_msg = insert_special_message('')
if special_msg:
pr_handler.submit_comment(special_msg)
# Post each failure as a status
existing_checks = pr_handler.list_checks()
for context, details in sorted(results.items()):
full_context = current_app.bot_username + ':' + context
# TODO: Revisit if the note made for statuses still applies to checks.
# NOTE: we could in principle check if the status has been posted
# before, and if so not post it again, but we had this in the past
# and there were some strange caching issues where GitHub would
# return old status messages, so we avoid doing that.
pr_handler.set_check(
full_context, details['description'],
details_url=details.get('target_url'), status='completed',
conclusion=details['state'])
# For statuses that have been skipped this time but existed before, set
# status to pass and set message to say skipped
for full_context in existing_checks:
if full_context.startswith(current_app.bot_username + ':'):
context = full_context[len(current_app.bot_username) + 1:]
if context not in results:
pr_handler.set_check(
current_app.bot_username + ':' + context,
'This check has been skipped', status='completed',
conclusion='neutral')
# Also set the general 'single' status check as a skipped check if it
# is present
if full_context == current_app.bot_username:
pr_handler.set_check(
current_app.bot_username, 'This check has been skipped',
status='completed', conclusion='neutral')
return 'Finished pull requests checks'
|
from core.util import text_util
class ReadingEngine:
def exec(self, text: str) -> str:
# text_util
result: str = text_util.preprocessing(text)
result = text_util.reading(result)
return result
|
from django.test import TransactionTestCase
from django.db.models import Q
from django.test.client import RequestFactory
from django.utils.text import slugify
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from mc2.organizations.models import Organization, OrganizationUserRelation
class OrganizationTestCase(TransactionTestCase):
def mk_user(self, username='foobar', email='foobar@gmail.com',
password='password', **kwargs):
User = get_user_model()
return User.objects.create_user(username, email, password, **kwargs)
def mk_organization(self, name='Foo', users=[], **kwargs):
fields = {
'name': name,
'slug': slugify(unicode(name))
}
fields.update(kwargs)
org = Organization.objects.create(**fields)
for user in users:
OrganizationUserRelation.objects.create(
user=user,
organization=org,
is_admin=True)
return org
def mk_request(self, method, *args, **kwargs):
request = RequestFactory()
request = getattr(request, method)(*args, **kwargs)
request.session = {}
return request
def get_perms(self, perm):
if isinstance(perm, basestring):
perms = (perm,)
else:
perms = perm
perms = [p.split('.', 1) for p in perms]
filter_clauses = [
Q(content_type__app_label=p[0], codename=p[1])
for p in perms]
perms_qs = Permission.objects.filter(
reduce(lambda x, y: x | y, filter_clauses))
if len(perms_qs) != len(perms):
raise Permission.DoesNotExist
return perms_qs
def grant_perms(self, obj, perm):
perms_field = ('permissions'
if isinstance(obj, Group)
else 'user_permissions')
perms = list(self.get_perms(perm))
getattr(obj, perms_field).add(*perms)
def revoke_perms(self, obj, perm):
perms_field = ('permissions'
if isinstance(obj, Group)
else 'user_permissions')
perms = list(self.get_perms(perm))
getattr(obj, perms_field).remove(*perms)
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ModelStatistics callback defination."""
import logging
import zeus
from .callback import Callback
from zeus.metrics import calc_model_flops_params, calc_forward_latency
from zeus.common import ClassFactory, ClassType
if zeus.is_torch_backend():
import torch
@ClassFactory.register(ClassType.CALLBACK)
class ModelStatistics(Callback):
"""Callback that log statistics about model after each epoch."""
def __init__(self):
"""Initialize ModelStatistics callback."""
super(Callback, self).__init__()
self.priority = 220
def before_train(self, logs=None):
"""Be called before the training process."""
self.input = None
self.flops = None
self.params = None
self.latency = None
self.calc_params_each_epoch = self.trainer.config.calc_params_each_epoch
self.calc_latency = self.trainer.config.calc_latency
if zeus.is_tf_backend():
import tensorflow as tf
datasets = self.trainer.valid_input_fn()
data_iter = tf.compat.v1.data.make_one_shot_iterator(datasets)
# data_iter = self.trainer.valid_input_fn().make_one_shot_iterator()
input_data, _ = data_iter.get_next()
self.input = input_data[:1]
def after_train_step(self, batch_index, logs=None):
"""Be called after each batch of Training."""
try:
if self.input is None:
input, target = logs['train_batch']
self.input = torch.unsqueeze(input[0], 0)
except Exception as ex:
logging.warning("model statics failed, ex=%s", ex)
def after_epoch(self, epoch, logs=None):
"""Be called after each epoch."""
if self.calc_params_each_epoch:
self.update_flops_params(epoch=epoch, logs=logs)
def after_train(self, logs=None):
"""Be called after train."""
if not self.calc_params_each_epoch:
self.update_flops_params(logs=logs)
if self.calc_latency:
self.update_latency(logs=logs)
def update_flops_params(self, epoch=None, logs=None):
"""Calculate flops and params."""
self.model = self.trainer.model
try:
if self.flops is None:
flops_count, params_count = calc_model_flops_params(self.model,
self.input)
self.flops, self.params = flops_count * 1e-9, params_count * 1e-3
summary_perfs = logs.get('summary_perfs', {})
if epoch:
summary_perfs.update({'flops': self.flops, 'params': self.params, 'epoch': epoch})
else:
summary_perfs.update({'flops': self.flops, 'params': self.params})
logs.update({'summary_perfs': summary_perfs})
logging.info("flops: {} , params:{}".format(self.flops, self.params))
except Exception as ex:
logging.warning("model statics failed, ex=%s", ex)
def update_latency(self, epoch=None, logs=None):
"""Calculate latency."""
self.model = self.trainer.model
try:
summary_perfs = logs.get('summary_perfs', {})
if self.latency is None:
sess_config = self.trainer._init_session_config() if zeus.is_tf_backend() else None
self.latency = calc_forward_latency(self.model, self.input, sess_config) * 1000
if epoch:
summary_perfs.update({'latency': self.latency, 'epoch': epoch})
else:
summary_perfs.update({'latency': self.latency})
logs.update({'summary_perfs': summary_perfs})
logging.info("flops: {} , params:{}, latency:{}".format(self.flops, self.params, self.latency))
except Exception as ex:
logging.warning("model statics failed, ex=%s", ex)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.