rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
def _lexcmp(a, b): i = 0 while True: if len(a) == len(b): if i == len(a): return 0 else: if i == len(a): return -1 if i == len(b): return 1 if a[i].isalpha(): if not b[i].isalpha(): return -1 if b[i].isalpha(): if not a[i].isalpha(): return 1 val = cmp(a[i], b[i]) if val != 0: return val i += 1
def getnum(self): str = self.str i = 0 for c in str: if c not in '0123456789': break i += 1
val = cmp(l1, l2)
val = _lexcmp(l1, l2)
def _compare(s1, s2): if s1 == s2: return 0 p1 = VersionParser(s1) p2 = VersionParser(s2) while True: l1 = p1.getlex() l2 = p2.getlex() val = cmp(l1, l2) if val != 0: return val n1 = p1.getnum() n2 = p2.getnum() val = cmp(n1, n2) if val != 0: return val if p1.str == p2.str: return 0
val = cmp(l1, l2)
val = _lexcmp(l1, l2)
def _compare_flat(s1, s2): if s1 == s2: return 0 while True: # parse lexical components i = 0 for c in s1: if c in '0123456789': break i += 1 if i: l1 = s1[:i] s1 = s1[i:] else: l1 = '' i = 0 for c in s2: if c in '0123456789': break i += 1 if i: l2 = s2[:i] s2 = s2[i:] else: l2 = '' val = cmp(l1, l2) if val != 0: return val # if lexical component is equal parse numeric component i = 0 for c in s1: if c not in '0123456789': break i += 1 if i: n1 = int(s1[:i]) s1 = s1[i:] else: n1 = 0 i = 0 for c in s2: if c not in '0123456789': break i += 1 if i: n2 = int(s2[:i]) s2 = s2[i:] else: n2 = 0 val = cmp(n1, n2) if val != 0: return val if s1 == s2: return 0
files = StockBase.Paths.files + \ [ 'source-versions', 'SYNC_HEAD', 'checkout' ]
files = [ 'source-versions', 'SYNC_HEAD', 'checkout' ]
def __init__(self, path, recursed_paths=[]): StockBase.__init__(self, path) if self.link in recursed_paths: raise CircularDependency("circular dependency detected `%s' is in recursed paths %s" % (self.link, recursed_paths))
path = join(path, ".git") command = "git --git-dir %s init" % commands.mkarg(path)
init_path = join(init_path, ".git") command = "git --git-dir %s init" % commands.mkarg(init_path)
def init_create(cls, path, bare=False, verbose=False): if not lexists(path): os.mkdir(path)
return dir, branch
return realpath(dir), branch
def _parse_stock(stock): try: dir, branch = stock.split("#", 1) except ValueError: dir = stock branch = None
if not self.stocks.has_key(stock_name) or \ self.stocks[stock_name].link != realpath(dir):
matches = [ stock for stock in self.stocks.values() if stock.link == dir and (not branch or stock.branch == branch) ] if not matches:
def unregister(self, stock): dir, branch = self._parse_stock(stock) stock_name = basename(dir) if branch: stock_name += "#" + branch if not self.stocks.has_key(stock_name) or \ self.stocks[stock_name].link != realpath(dir): raise Error("no matches for unregister")
shutil.rmtree(self.stocks[stock_name].path) del self.stocks[stock_name]
if len(matches) > 1: raise Error("multiple implicit matches for unregister") stock = matches[0] shutil.rmtree(stock.path) del self.stocks[stock.name]
def unregister(self, stock): dir, branch = self._parse_stock(stock) stock_name = basename(dir) if branch: stock_name += "#" + branch if not self.stocks.has_key(stock_name) or \ self.stocks[stock_name].link != realpath(dir): raise Error("no matches for unregister")
def getpath_cached(p, package): """get path of cached package, whether in the pool or in a subpool""" path = p.pkgcache.getpath(package)
def pkgcache_list_versions(pool, name): versions = [ pkgcache_version for pkgcache_name, pkgcache_version in pool.pkgcache.list() if pkgcache_name == name ] for subpool in pool.subpools: versions += pkgcache_list_versions(subpool, name) return versions def pkgcache_getpath_newest(pool, name): versions = pkgcache_list_versions(pool, name) if not versions: return None versions.sort(debversion.compare) version_newest = versions[-1] package = pool.fmt_package_id(name, version_newest) return pool.getpath_deb(package, build=False) def binary2source(pool, package): """translate package from binary to source""" name, version = pool.parse_package_id(package) if version: path = pool.getpath_deb(package, build=False) if not path: return None source_name = extract_source_name(path) if not source_name: return package return pool.fmt_package_id(source_name, version) path = pkgcache_getpath_newest(pool, name) if not path: return None source_name = extract_source_name(path) if not source_name: return name return source_name def getpath_build_log(package): try: pool = Pool() except Pool.Error, e: fatal(e) path = pool.getpath_build_log(package)
def getpath_cached(p, package): """get path of cached package, whether in the pool or in a subpool""" path = p.pkgcache.getpath(package) if path: return path for subpool in p.subpools: path = getpath_cached(subpool, package) if path: return path return None
for subpool in p.subpools: path = getpath_cached(subpool, package) if path: return path
source_package = binary2source(pool, package) if source_package: path = pool.getpath_build_log(source_package)
def getpath_cached(p, package): """get path of cached package, whether in the pool or in a subpool""" path = p.pkgcache.getpath(package) if path: return path for subpool in p.subpools: path = getpath_cached(subpool, package) if path: return path return None
return None
if not path: package_desc = `package` if source_package: package_desc += " (%s)" % source_package fatal("no build log for " + package_desc) return path
def getpath_cached(p, package): """get path of cached package, whether in the pool or in a subpool""" path = p.pkgcache.getpath(package) if path: return path for subpool in p.subpools: path = getpath_cached(subpool, package) if path: return path return None
try: p = pool.Pool() except pool.Error, e: fatal(e)
path = getpath_build_log(package)
def main(): args = sys.argv[1:] if not args: usage() package = args[0] try: p = pool.Pool() except pool.Error, e: fatal(e) source_package = package deb = getpath_cached(p, package) if deb: source_name = extract_source_name(deb) if source_name: source_package = source_name if '=' in package: source_package += "=" + package.split("=", 1)[1] path = p.getpath_build_log(source_package) if not path: fatal("no build log for `%s' (%s)" % (package, source_package)) for line in file(path).readlines(): print line,
source_package = package deb = getpath_cached(p, package) if deb: source_name = extract_source_name(deb) if source_name: source_package = source_name if '=' in package: source_package += "=" + package.split("=", 1)[1] path = p.getpath_build_log(source_package) if not path: fatal("no build log for `%s' (%s)" % (package, source_package))
def main(): args = sys.argv[1:] if not args: usage() package = args[0] try: p = pool.Pool() except pool.Error, e: fatal(e) source_package = package deb = getpath_cached(p, package) if deb: source_name = extract_source_name(deb) if source_name: source_package = source_name if '=' in package: source_package += "=" + package.split("=", 1)[1] path = p.getpath_build_log(source_package) if not path: fatal("no build log for `%s' (%s)" % (package, source_package)) for line in file(path).readlines(): print line,
if val != 0: return val n1 = p1.getnum() n2 = p2.getnum() val = cmp(n1, n2)
def _compare(s1, s2): if s1 == s2: return 0 p1 = VersionParser(s1) p2 = VersionParser(s2) while True: l1 = p1.getlex() l2 = p2.getlex() val = cmp(l1, l2) if val != 0: return val n1 = p1.getnum() n2 = p2.getnum() val = cmp(n1, n2) if val != 0: return val
epoch = 0
epoch = ''
def parse(v): if ':' in v: epoch, v = v.split(':', 1) else: epoch = 0 if '-' in v: upstream_version, debian_revision = v.rsplit('-', 1) else: upstream_version = v debian_revision = '' return epoch, upstream_version, debian_revision
class VersionParser: def __init__(self, str): self.str = str def getlex(self): lex = re.match(r'(\D*)', self.str).group(1) self.str = self.str[len(lex):] return lex def getnum(self): num = re.match(r'(\d*)', self.str).group(1) self.str = self.str[len(num):] if num: return int(num) return 0 def _compare(s1, s2): if s1 == s2: return 0 p1 = VersionParser(s1) p2 = VersionParser(s2) while True: l1 = p1.getlex() l2 = p2.getlex() val = cmp(l1, l2) if val != 0: return val n1 = p1.getnum() n2 = p2.getnum() val = cmp(n1, n2) if val != 0: return val
def parse(v): if ':' in v: epoch, v = v.split(':', 1) else: epoch = 0 if '-' in v: upstream_version, debian_revision = v.rsplit('-', 1) else: upstream_version = v debian_revision = '' return epoch, upstream_version, debian_revision
epoch = ''
epoch = '0'
def parse(v): if ':' in v: epoch, v = v.split(':', 1) else: epoch = '' if '-' in v: upstream_version, debian_revision = v.rsplit('-', 1) else: upstream_version = v debian_revision = '' return epoch, upstream_version, debian_revision
debian_revision = ''
debian_revision = '0'
def parse(v): if ':' in v: epoch, v = v.split(':', 1) else: epoch = '' if '-' in v: upstream_version, debian_revision = v.rsplit('-', 1) else: upstream_version = v debian_revision = '' return epoch, upstream_version, debian_revision
a = parse(normalize(a)) b = parse(normalize(b))
a = parse(a) b = parse(b)
def compare(a, b): """Compare a with b according to Debian versioning criteria""" a = parse(normalize(a)) b = parse(normalize(b)) for i in (0, 1, 2): val = _compare(a[i], b[i]) if val != 0: return val return 0
val = _compare(a[i], b[i])
val = _compare_flat(a[i], b[i])
def compare(a, b): """Compare a with b according to Debian versioning criteria""" a = parse(normalize(a)) b = parse(normalize(b)) for i in (0, 1, 2): val = _compare(a[i], b[i]) if val != 0: return val return 0
howmany = 1000
howmany = 10000
def test(): import time howmany = 1000 start = time.time() for i in xrange(howmany): compare("0-2010.10.1-d6cbb928", "0-2010.10.10-a9ee521c") end = time.time() elapsed = end - start print "%d runs in %.4f seconds (%.2f per/sec)" % (howmany, elapsed, howmany / elapsed)
packages = dict(self._list())
packages = dict(self._list(all_versions=False))
def resolve(self, unresolved): """Resolve a list of unresolved packages. If unresolved is a single unresolved package, return a single resolved package. If unresolved is a tuple or list of unresolved packages, return a list of resolved packages"""
if val != 0: return val l1 = p1.getlex() l2 = p2.getlex() val = cmp(l1, l2)
def _compare(s1, s2): if s1 == s2: return 0 p1 = VersionParser(s1) p2 = VersionParser(s2) while True: n1 = p1.getnum() n2 = p2.getnum() val = cmp(n1, n2) if val != 0: return val l1 = p1.getlex() l2 = p2.getlex() val = cmp(l1, l2) if val != 0: return val if p1.str == p2.str: return 0
i = 0 for c in s1: if c in '0123456789': break i += 1 if i: l1 = s1[:i] s1 = s1[i:] else: l1 = '' i = 0 for c in s2: if c in '0123456789': break i += 1 if i: l2 = s2[:i] s2 = s2[i:] else: l2 = '' val = cmp(l1, l2) if val != 0: return val
def _compare_flat(s1, s2): if s1 == s2: return 0 while True: # parse numeric component for comparison i = 0 for c in s1: if c not in '0123456789': break i += 1 if i: n1 = int(s1[:i]) s1 = s1[i:] else: n1 = 0 i = 0 for c in s2: if c not in '0123456789': break i += 1 if i: n2 = int(s2[:i]) s2 = s2[i:] else: n2 = 0 val = cmp(n1, n2) if val != 0: return val # if numeric components equal, parse lexical components i = 0 for c in s1: if c in '0123456789': break i += 1 if i: l1 = s1[:i] s1 = s1[i:] else: l1 = '' i = 0 for c in s2: if c in '0123456789': break i += 1 if i: l2 = s2[:i] s2 = s2[i:] else: l2 = '' val = cmp(l1, l2) if val != 0: return val if s1 == s2: return 0
if val != 0: return val i = 0 for c in s1: if c in '0123456789': break i += 1 if i: l1 = s1[:i] s1 = s1[i:] else: l1 = '' i = 0 for c in s2: if c in '0123456789': break i += 1 if i: l2 = s2[:i] s2 = s2[i:] else: l2 = '' val = cmp(l1, l2)
def _compare_flat(s1, s2): if s1 == s2: return 0 while True: # parse numeric component for comparison i = 0 for c in s1: if c not in '0123456789': break i += 1 if i: n1 = int(s1[:i]) s1 = s1[i:] else: n1 = 0 i = 0 for c in s2: if c not in '0123456789': break i += 1 if i: n2 = int(s2[:i]) s2 = s2[i:] else: n2 = 0 val = cmp(n1, n2) if val != 0: return val # if numeric components equal, parse lexical components i = 0 for c in s1: if c in '0123456789': break i += 1 if i: l1 = s1[:i] s1 = s1[i:] else: l1 = '' i = 0 for c in s2: if c in '0123456789': break i += 1 if i: l2 = s2[:i] s2 = s2[i:] else: l2 = '' val = cmp(l1, l2) if val != 0: return val if s1 == s2: return 0
if not packages and not input:
if not args[1:] and not input:
def main(): try: opts, args = getopt.getopt(sys.argv[1:], 'i:sqt', ['input=', 'strict', 'quiet', 'tree']) except getopt.GetoptError, e: usage(e) if not args: usage() outputdir = args[0] packages = args[1:] input = None opt_strict = False opt_quiet = False opt_tree = False for opt, val in opts: if opt in ('-i', '--input'): if val == '-': input = sys.stdin else: input = file(val, "r") elif opt in ('-s', '--strict'): opt_strict = True elif opt in ('-q', '--quiet'): opt_quiet = True elif opt in ('-t', '--tree'): opt_tree = True p = pool.Pool() if input: packages += read_packages(input) resolved = [] unresolved = [] for package in packages: if not p.exists(package): if opt_strict: fatal("%s: no such package" % package) if not opt_quiet: warn("%s: no such package" % package) continue if '=' in package: resolved.append(package) else: unresolved.append(package) if unresolved: resolved += fmt_package_tuples(cmd_list.list_packages(False, unresolved)) packages = resolved if not packages and not input: # if no packages specified, get all the newest versions packages = fmt_package_tuples(p.list()) for package in packages: path_from = p.getpath_deb(package) fname = basename(path_from) if opt_tree: package_name = package.split("=")[0] path_to = join(outputdir, get_treedir(package_name), fname) mkdir(dirname(path_to)) else: path_to = join(outputdir, basename(path_from)) if not exists(path_to): hardlink_or_copy(path_from, path_to) sys.exit(exitcode)
packages.sort(reverse=True)
def _cmp(a, b): val = cmp(b[0], a[0]) if val != 0: return val return debversion.compare(a[1], b[1]) packages.sort(cmp=_cmp, reverse=True)
def main(): try: opts, args = getopt.getopt(sys.argv[1:], 'an', ['all-versions', 'name-only']) except getopt.GetoptError, e: usage(e) opt_all_versions = False opt_name_only = False for opt, val in opts: if opt in ('-a', '--all-versions'): opt_all_versions = True elif opt in ('-n', '--name-only'): opt_name_only = True if opt_name_only and opt_all_versions: fatal("--name-only and --all-versions are conflicting options") globs = args packages = list_packages(opt_all_versions, globs) packages.sort(reverse=True) if opt_name_only: names = set() for name, version in packages: names.add(name) for name in names: print name else: for name, version in packages: print "%s=%s" % (name, version)
paths = self.Paths(path)
paths = cls.Paths(path)
def create(cls, path, link): mkdir(path) paths = self.Paths(path) os.symlink(realpath(link), paths.link)
if self.branch: relative_path = make_relative(self.paths.checkout, dir) else: relative_path = make_relative(self.link, dir)
relative_path = make_relative(self.workdir, dir)
def _sync_update_source_versions(self, dir): """update versions for a particular source package at <dir>""" packages = deb_get_packages(dir) versions = verseek.list(dir)
self.head = Git(self.workdir).rev_parse("HEAD")
self.head = Git(self.paths.checkout).rev_parse("HEAD")
def sync(self): """sync stock by updating source versions and importing binaries into the cache"""
for subpool in subpools:
for subpool in self.subpools:
def print_info(self): if len(self.stocks): print "# stocks" for stock in self.stocks: addr = stock.link if stock.branch: addr += "#" + stock.branch print addr
checkout_path = stock.paths.checkout if exists(join(checkout_path, "arena.internals")): command = "cd %s && sumo-close" % commands.mkarg(checkout_path) error = os.system(command) if error: raise Error("failed command: " + command) shutil.rmtree(stock.paths.path)
def unregister(self, stock): dir, branch = self._parse_stock(stock) stock_name = basename(dir) if branch: stock_name += "#" + branch matches = [ stock for stock in self.stocks.values() if stock.link == dir and (not branch or stock.branch == branch) ] if not matches: raise Error("no matches for unregister")
if stock.name in self.subpools:
if isinstance(stock, StockPool):
def unregister(self, stock): dir, branch = self._parse_stock(stock) stock_name = basename(dir) if branch: stock_name += "#" + branch matches = [ stock for stock in self.stocks.values() if stock.link == dir and (not branch or stock.branch == branch) ] if not matches: raise Error("no matches for unregister")
for name, version in self.pkgcache.list(): packages.add((name, version)) else: newest = {} for name, version in self.pkgcache.list(): if not newest.has_key(name) or newest[name] < version: newest[name] = version for name, version in newest.items(): packages.add((name, version)) return list(packages)
return list(packages) newest = {} for name, version in packages: if not newest.has_key(name) or newest[name] < version: newest[name] = version return newest.items()
def list(self, all_versions=False): """List packages in pool -> list of (name, version) tuples.
commit = orig.rev_parse(self.branch) if not commit: raise Error("no such branch `%s' at %s" % (self.branch, self.link)) checkout.update_ref("refs/heads/" + self.branch, commit)
def dup_branch(branch): commit = orig.rev_parse(branch) if not commit: raise Error("no such branch `%s' at %s" % (branch, self.link)) checkout.update_ref("refs/heads/" + branch, commit) dup_branch(self.branch)
def _get_workdir(self): """Return an initialized workdir path.
return self._getoutput("git-rev-parse", rev + "^0")
return self._getoutput("git-rev-parse", rev)
def rev_parse(self, rev): """git-rev-parse <rev>. Returns object-id of parsed rev. Returns None on failure. """ try: return self._getoutput("git-rev-parse", rev + "^0") except self.Error: return None
for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break
if not private_key_string: raise TypeError("private_key_string must be provided if api_key is not") if not isinstance(private_key_string, basestring): raise TypeError("private_key_string must be a string") if not isinstance(xmlrpc_url, basestring): raise TypeError("xmlrpc_url must be a string") if not isinstance(allow_ssl_insecure, bool): raise TypeError("allow_ssl_insecure must be True or False") if not isinstance(ca_certs_file, basestring): raise TypeError("ca_certs_file must be a string") if allow_ssl_insecure: self.proxy = xmlrpclib.Server(xmlrpc_url) else: ssl_transport = _get_ssl_transport(ca_certs_file) self.proxy = xmlrpclib.Server(xmlrpc_url, transport=ssl_transport) if not api_key: api_key = self._get_api_key(username, private_key_string) self.auth = {'username':username, 'api_key':api_key} def _get_api_key(self, username, private_key_string): try: import repyhelper import repyportability repyhelper.translate_and_import("rsa.repy") except ImportError, e: raise SeattleGENIError("Unable to get API key from SeattleGENI " + "because a required python or repy module " + "cannot be found:" + str(e)) private_key_dict = rsa_string_to_privatekey(private_key_string) encrypted_data = self.proxy.get_encrypted_api_key(username) decrypted_data = rsa_decrypt(encrypted_data, private_key_dict) split_data = decrypted_data.split("!") if len(split_data) != 2 or len(split_data[0]) != 20: raise AuthenticationError("The provided private key does not appear " + "to correspond to this account's public key: " + "encrypted API key could not be decrypted.") api_key = split_data[1] return api_key def _do_call(self, function, *args): try: return function(self.auth, *args) except socket.error, err: raise CommunicationError("XMLRPC failed: " + str(err)) except xmlrpclib.Fault, fault: if fault.faultCode == FAULTCODE_AUTHERROR: raise AuthenticationError elif fault.faultCode == FAULTCODE_INVALIDREQUEST: raise InvalidRequestError(fault.faultString) elif fault.faultCode == FAULTCODE_NOTENOUGHCREDITS: raise NotEnoughCreditsError(fault.faultString) elif fault.faultCode == FAULTCODE_UNABLETOACQUIRE: raise UnableToAcquireResourcesError(fault.faultString)
def start_accepter(): shimstack = ShimStackInterface('(RSAShim)(NatDeciderShim)') unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) # do this until we get the accepter started... while True: if is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the acceptor is started acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # check infrequently time.sleep(configuration['pollfrequency'])
servicelogger.log("[ERROR]: cannot find a port for waitforconn.") time.sleep(configuration['pollfrequency']) def is_worker_thread_started(): for thread in threading.enumerate(): if 'WorkerThread' in str(thread): return True else: return False def start_worker_thread(sleeptime): if not is_worker_thread_started(): workerthread = nmconnectionmanager.WorkerThread(sleeptime) workerthread.setDaemon(True) workerthread.start() def is_advert_thread_started(): for thread in threading.enumerate(): if 'Advertisement Thread' in str(thread): return True else: return False def start_advert_thread(vesseldict, myname, nodekey): if should_start_waitable_thread('advert','Advertisement Thread'): advertthread = nmadvertise.advertthread(vesseldict, nodekey) nmadvertise.myname = myname advertthread.setDaemon(True) advertthread.start() started_waitable_thread('advert') def is_status_thread_started(): for thread in threading.enumerate(): if 'Status Monitoring Thread' in str(thread): return True else: return False def start_status_thread(vesseldict,sleeptime): if should_start_waitable_thread('status','Status Monitoring Thread'): statusthread = nmstatusmonitor.statusthread(vesseldict, sleeptime, nmAPI) statusthread.setDaemon(True) statusthread.start() started_waitable_thread('status') def main(): global configuration if not FOREGROUND: daemon.daemonize() gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return servicelogger.log("[INFO]:Loading config") configuration = persist.restore_object("nodeman.cfg") initialize_ip_interface_restrictions(configuration) if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) myip = None while True:
raise InternalError(fault.faultString) def _do_pwauth_call(self, function, password, *args): """For use by calls that require a password rather than an api key.""" pwauth = {'username':self.auth['username'], 'password':password}
def start_accepter(): shimstack = ShimStackInterface('(RSAShim)(NatDeciderShim)') unique_id = rsa_publickey_to_string(configuration['publickey']) unique_id = sha_hexhash(unique_id) + str(configuration['service_vessel']) # do this until we get the accepter started... while True: if is_accepter_started(): # we're done, return the name! return myname else: for possibleport in configuration['ports']: try: servicelogger.log("[INFO]: Trying to wait") shimstack.waitforconn(unique_id, possibleport, nmconnectionmanager.connection_handler) except Exception, e: servicelogger.log("[ERROR]: when calling waitforconn for the connection_handler: " + str(e)) servicelogger.log_last_exception() else: # the waitforconn was completed so the acceptor is started acceptor_state['lock'].acquire() acceptor_state['started']= True acceptor_state['lock'].release() # assign the nodemanager name myname = unique_id + ":" + str(possibleport) servicelogger.log("[INFO]: Now listening as " + myname) break else: servicelogger.log("[ERROR]: cannot find a port for waitforconn.") # check infrequently time.sleep(configuration['pollfrequency'])
myip = emulcomm.getmyip() except Exception, e: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": pass
return function(pwauth, *args) except socket.error, err: raise CommunicationError("XMLRPC failed: " + str(err)) except xmlrpclib.Fault, fault: if fault.faultCode == FAULTCODE_AUTHERROR: raise AuthenticationError elif fault.faultCode == FAULTCODE_INVALIDREQUEST: raise InvalidRequestError(fault.faultString) elif fault.faultCode == FAULTCODE_NOTENOUGHCREDITS: raise NotEnoughCreditsError(fault.faultString) elif fault.faultCode == FAULTCODE_UNABLETOACQUIRE: raise UnableToAcquireResourcesError(fault.faultString)
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
raise else: break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) myname = start_accepter() servicelogger.log('myname = '+str(myname)) start_worker_thread(configuration['pollfrequency']) start_advert_thread(vesseldict, myname, configuration['publickey']) start_status_thread(vesseldict,configuration['pollfrequency']) servicelogger.log("[INFO]:Started") times_through_the_loop = 0 while True: if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...") if __name__ == '__main__': for arg in sys.argv[1:]: if arg == '-nat': AUTO_USE_NAT = True if arg == '--foreground': FOREGROUND = True servicelogger.init('nodemanager')
raise InternalError(fault.faultString) def acquire_lan_resources(self, count): """ <Purpose> Acquire LAN vessels. <Arguments> count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' LAN vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ return self.acquire_resources('lan', count) def acquire_wan_resources(self, count): """ <Purpose> Acquire WAN vessels. <Arguments> count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' WAN vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ return self.acquire_resources('wan', count) def acquire_nat_resources(self, count): """ <Purpose> Acquire NAT vessels. <Arguments> count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' NAT vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ return self.acquire_resources('nat', count) def acquire_random_resources(self, count): """ <Purpose> Acquire vessels (they can be LAN, WAN, NAT, or any combination of these). <Arguments> count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ return self.acquire_resources('random', count) def acquire_resources(self, res_type, count): """ <Purpose> Acquire vessels. <Arguments> res_type A string describing the type of vessels to acquire. count The number of vessels to acquire. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, 'count' vessels have been acquired for the account. <Returns> A list of vessel handles of the acquired vessels. """ if not isinstance(res_type, basestring): raise TypeError("res_type must be a string") if type(count) not in [int, long]: raise TypeError("count must be an integer") rspec = {'rspec_type':res_type, 'number_of_nodes':count} return self._do_call(self.proxy.acquire_resources, rspec) def acquire_specific_vessels(self, handlelist): """ <Purpose> Attempt to acquire specific vessels. <Arguments> handlelist A list of vessel handles. <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account does not have enough available vessel credits to fulfill the request. <Side Effects> If successful, zero or more vessels from handlelist have been acquired. <Returns> A list of vessel handles of the acquired vessels. """ _validate_handle_list(handlelist) return self._do_call(self.proxy.acquire_specific_vessels, handlelist) def release_resources(self, handlelist): """ <Purpose> Release vessels. <Arguments> handlelist A list of handles as returned by acquire_vessels() or found in the 'handle' key of the dictionaries returned by get_resource_info(). <Exceptions> The common exceptions described in the module comments. <Side Effects> If successful, the vessels in handlelist have been released. If not successful, it is possible that a partial set of the vessels was released. <Returns> None """ _validate_handle_list(handlelist) return self._do_call(self.proxy.release_resources, handlelist) def renew_resources(self, handlelist): """ <Purpose> Renew vessels. <Arguments> handlelist A list of handles as returned by acquire_vessels() or found in the 'handle' key of the dictionaries returned by get_resource_info(). <Exceptions> The common exceptions described in the module comments, as well as: SeattleGENINotEnoughCredits If the account is currently over its vessel credit limit, then vessels cannot be renewed until the account is no longer over its credit limit. <Side Effects> If successful, the vessels in handlelist have been renewed. If not successful, it is possible that a partial set of the vessels was renewed. <Returns> None """ _validate_handle_list(handlelist) return self._do_call(self.proxy.renew_resources, handlelist) def get_resource_info(self): """ <Purpose> Obtain information about acquired vessels. <Arguments> None <Exceptions> The common exceptions described in the module comments, as well as: <Side Effects> None <Returns> A list of dictionaries, where each dictionary describes a vessel that is currently acquired by the account. """ return self._do_call(self.proxy.get_resource_info) def get_account_info(self): """ <Purpose> Obtain information about the account. <Arguments> None <Exceptions> The common exceptions described in the module comments, as well as: <Side Effects> None <Returns> A dictionary with information about the account. """ return self._do_call(self.proxy.get_account_info) def get_public_key(self): """ <Purpose> Obtain the public key of the account. <Arguments> None <Exceptions> The common exceptions described in the module comments, as well as: None <Side Effects> None <Returns> A string containing the public key of the account. """ return self._do_call(self.proxy.get_public_key) def set_public_key(self, password, pubkeystring): """ <Purpose> Set the public key of the account. <Arguments> password The account password. This is required because changing the public key of the account cannot be done with just the api key. pubkeystring A string representing the new public key to be set for the account. <Exceptions> The common exceptions described in the module comments, as well as: InvalidRequestError If the pubkey is invalid. <Side Effects> The public key of the account is changed and will be updated on all vessels the account has acquired. <Returns> None """ self._do_pwauth_call(self.proxy.set_public_key, password, pubkeystring) def regenerate_api_key(self, password): """ <Purpose> Generate a new API key for the account.. <Arguments> password The account password. This is required because changing the api key of the account cannot be done with just the current api key. <Exceptions> The common exceptions described in the module comments, as well as: None <Side Effects> The account's api key has been changed. <Returns> The new api key for the account. """ api_key = self._do_pwauth_call(self.proxy.regenerate_api_key, password) self.auth['api_key'] = api_key return api_key def _validate_handle_list(handlelist): """ Raise a TypeError or ValueError if handlelist is not a non-empty list of string. """ if not isinstance(handlelist, list): raise TypeError("Invalid data type for handle list: " + str(type(handlelist))) for handle in handlelist: if not isinstance(handle, basestring): raise TypeError("Invalid data type for a handle in the handle list: " + str(type(handle))) if not handlelist: raise ValueError("Given handlelist is empty.") def _get_ssl_transport(ca_certs_file): """ Returns an object usable as the transport for an xmlrpclib proxy. This will be an M2Crypto.m2xmlrpclib.SSL_Transport that has been configured with a context that has the ca_certs_file loaded, will not allow SSLv2, and will reject certificate names that don't match the hostname. """
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
main() except Exception,e: servicelogger.log_last_exception() harshexit.harshexit(15)
import M2Crypto except ImportError, err: raise ImportError("In order to use the SeattleGENI XMLRPC client with " + "allow_ssl_insecure=False, you need M2Crypto " + "installed. " + str(err)) class M2CryptoSSLTransport(M2Crypto.m2xmlrpclib.SSL_Transport): def request(self, host, handler, request_body, verbose=0): if host.find(":") == -1: host = host + ":443" return M2Crypto.m2xmlrpclib.SSL_Transport.request(self, host, handler, request_body, verbose) ctx = M2Crypto.SSL.Context("sslv3") ctx.set_verify(M2Crypto.SSL.verify_peer | M2Crypto.SSL.verify_fail_if_no_peer_cert, depth=9) if ctx.load_verify_locations(ca_certs_file) != 1: raise SeattleGENIError("No CA certs found in file: " + ca_certs_file) return M2CryptoSSLTransport(ctx) class SeattleGENIError(Exception): """Base class for exceptions raised by the SeattleGENIClient.""" class CommunicationError(SeattleGENIError): """ Indicates that XMLRPC communication failed. """ class InternalError(SeattleGENIError): """ Indicates an unexpected error occurred, probably either a bug in this client or a bug in SeattleGENI. """ class AuthenticationError(SeattleGENIError): """Indicates an authentication failure (invalid username and/or API key).""" def __init__(self, msg=None): if msg is None: msg = "Authentication failed. Invalid username and/or API key." SeattleGENIError.__init__(self, msg) class InvalidRequestError(SeattleGENIError): """Indicates that the request is invalid.""" class NotEnoughCreditsError(SeattleGENIError): """ Indicates that the requested operation requires more vessel credits to be available then the account currently has. """ class UnableToAcquireResourcesError(SeattleGENIError): """ Indicates that the requested operation failed because SeattleGENI was unable to acquire the requested resources. """ if __name__ == "__main__": main()
def main(): global configuration if not FOREGROUND: # Background ourselves. daemon.daemonize() # ensure that only one instance is running at a time... gotlock = runonce.getprocesslock("seattlenodemanager") if gotlock == True: # I got the lock. All is well... pass else: if gotlock: servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + ") is running") else: servicelogger.log("[ERROR]:Another node manager process is running") return # I'll grab the necessary information first... servicelogger.log("[INFO]:Loading config") # BUG: Do this better? Is this the right way to engineer this? configuration = persist.restore_object("nodeman.cfg") # Armon: initialize the network restrictions initialize_ip_interface_restrictions(configuration) # ZACK BOKA: For Linux and Darwin systems, check to make sure that the new # seattle crontab entry has been installed in the crontab. # Do this here because the "nodeman.cfg" needs to have been read # into configuration via the persist module. if nonportable.ostype == 'Linux' or nonportable.ostype == 'Darwin': if 'crontab_updated_for_2009_installer' not in configuration or \ configuration['crontab_updated_for_2009_installer'] == False: try: import update_crontab_entry modified_crontab_entry = \ update_crontab_entry.modify_seattle_crontab_entry() # If updating the seattle crontab entry succeeded, then update the # 'crontab_updated_for_2009_installer' so the nodemanager no longer # tries to update the crontab entry when it starts up. if modified_crontab_entry: configuration['crontab_updated_for_2009_installer'] = True persist.commit_object(configuration,"nodeman.cfg") except Exception,e: exception_traceback_string = traceback.format_exc() servicelogger.log("[ERROR]: The following error occured when " \ + "modifying the crontab for the new 2009 " \ + "seattle crontab entry: " \ + exception_traceback_string) # get the external IP address... # BUG: What if my external IP changes? (A problem throughout) myip = None while True: try: # Try to find our external IP. myip = emulcomm.getmyip() except Exception, e: # If we aren't connected to the internet, emulcomm.getmyip() raises this: if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.": # So we try again. pass else: # It wasn't emulcomm.getmyip()'s exception. re-raise. raise else: # We succeeded in getting our external IP. Leave the loop. break time.sleep(0.1) vesseldict = nmrequesthandler.initialize(myip, configuration['publickey'], version) # Start accepter... myname = start_accepter() #send our advertised name to the log servicelogger.log('myname = '+str(myname)) # Start worker thread... start_worker_thread(configuration['pollfrequency']) # Start advert thread... start_advert_thread(vesseldict, myname, configuration['publickey']) # Start status thread... start_status_thread(vesseldict,configuration['pollfrequency']) # we should be all set up now. servicelogger.log("[INFO]:Started") # I will count my iterations through the loop so that I can log a message # periodically. This makes it clear I am alive. times_through_the_loop = 0 # BUG: Need to exit all when we're being upgraded while True: # E.K Previous there was a check to ensure that the acceptor # thread was started. There is no way to actually check this # and this code was never executed, so i removed it completely if not is_worker_thread_started(): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...") start_worker_thread(configuration['pollfrequency']) if should_start_waitable_thread('advert','Advertisement Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...") start_advert_thread(vesseldict, myname, configuration['publickey']) if should_start_waitable_thread('status','Status Monitoring Thread'): servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...") start_status_thread(vesseldict,configuration['pollfrequency']) if not runonce.stillhaveprocesslock("seattlenodemanager"): servicelogger.log("[ERROR]:The node manager lost the process lock...") harshexit.harshexit(55) time.sleep(configuration['pollfrequency']) # if I've been through the loop enough times, log this... times_through_the_loop = times_through_the_loop + 1 if times_through_the_loop % LOG_AFTER_THIS_MANY_ITERATIONS == 0: servicelogger.log("[INFO]: node manager is alive...")
def drvterm(t,p,q,l,m): dv=t.betx**(p/2.)*t.bety**(q/2.) dv*=exp(+2j*pi*((p-2*l)*t.mux+(q-2*m)*t.muy))
def drvterm(t,p=0,q=0,l=0,m=0): dv=t.betx**(abs(p)/2.)*t.bety**(abs(q)/2.) dv*=_n.exp(+2j*pi*((p-2*l)*t.mux+(q-2*m)*t.muy))
def drvterm(t,p,q,l,m): dv=t.betx**(p/2.)*t.bety**(q/2.) dv*=exp(+2j*pi*((p-2*l)*t.mux+(q-2*m)*t.muy)) return dv
s=self.xaxis
s=self.ont.s
def _lattice(self,names,color,lbl):
prenoms=[unicode(x.strip(),"utf-8") for x in open("prenoms.txt")]
prenoms=[unicode(x.strip(),"utf-8").capitalize() for x in open("prenoms.txt")]
def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self)
p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError:
p1,p2=None,None while p1 is None or frozenset((p1,p2)) in self.ballots.ballots.keys(): if not self.combis:
def update(self): p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError: print "Thanks, you are done!" QtGui.QApplication.instance().quit() sys.exit(0) self.ui.prenom1.setText(p1) self.ui.prenom2.setText(p2)
QtGui.QApplication.instance().quit()
def update(self): p1,p2=self.combis.pop() while frozenset((p1,p2)) in self.ballots.ballots.keys(): try: p1,p2=self.combis.pop() except IndexError: print "Thanks, you are done!" QtGui.QApplication.instance().quit() sys.exit(0) self.ui.prenom1.setText(p1) self.ui.prenom2.setText(p2)
old_sep,old_count=self.ballots[self.get_couple(ballot)]
d1,d2,old_sep,old_count=self.ballots[self.get_couple(ballot)]
def add(self,ballot): winner,sep,other,count=ballot winner=winner.capitalize() other=other.capitalize() if not self.is_in(ballot): self.ballots[self.get_couple(ballot)]=(winner,sep,other,count) else: old_sep,old_count=self.ballots[self.get_couple(ballot)] assert(old_sep==sep) self.ballots[self.get_couple(ballot)]=(winner,sep,other,old_count+count)
return repr(count)+":"+winner+sep+other
return unicode(repr(count))+u":"+winner+sep+other
def ballot_repr(self,ballot): winner,sep,other,count=ballot return repr(count)+":"+winner+sep+other
f.write((self.ballot_repr(ballot)+"\n").encode("utf-8"))
f.write((self.ballot_repr(ballot)+u"\n").encode("utf-8"))
def save(self): with open(self.filename,"w") as f: for ballot in self.ballots.values(): f.write((self.ballot_repr(ballot)+"\n").encode("utf-8"))
b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1)
b=(unicode(self.ui.prenom1.text()),u"=",unicode(self.ui.prenom2.text()),1)
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1)
b=(unicode(self.ui.prenom1.text()),u">",unicode(self.ui.prenom2.text()),1)
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1)
b=(unicode(self.ui.prenom2.text()),u">",unicode(self.ui.prenom1.text()),1)
def count_ballot_and_update(self,win): if win == 0: b=(unicode(self.ui.prenom1.text()),"=",unicode(self.ui.prenom2.text()),1) elif win==1: b=(unicode(self.ui.prenom1.text()),">",unicode(self.ui.prenom2.text()),1) elif win==2: b=(unicode(self.ui.prenom2.text()),">",unicode(self.ui.prenom1.text()),1) self.ballots.add(b) self.ballots.save() self.update()
base.__name__,
base.__plain_name__,
def __new__(meta_class, class_name, bases, class_dict, **kw_arguments): """ Create a new type object, for example through a 'class' statement. Behaves like a class method and is called before __init__(). """ if kw_arguments: # Assigning values to the parameters means specializing the # template. Therefore, derive a subclass from this meta-class # and make it create the actual type object. specialized_meta_class = meta_class.__specialize( kw_arguments ) # Base classes must have the same specialized meta-class. specialized_bases = [] for base in bases: if base.__class__ is meta_class: specialized_bases.append( specialized_meta_class.__new__( specialized_meta_class, base.__name__, base.__bases__, base.__dict__ ) ) else: specialized_bases.append( base ) return specialized_meta_class.__new__( specialized_meta_class, class_name, tuple( specialized_bases ), class_dict ) else: # No specialization. Create a type object. extended_name = meta_class.__template_name( class_name, meta_class.__parameters__, meta_class.__parameter_map__ ) extended_dict = meta_class.__parameter_map__.copy() extended_dict.update( class_dict ) extended_dict[ "__plain_name__" ] = class_name return type.__new__( meta_class, extended_name, bases, extended_dict )
@param[in] stats The @c pstats.Stats compatible object whose data is to be represented as the new CallGraph.
@param stats The @c pstats.Stats compatible object whose data is to be represented as the new CallGraph.
def __init__(self, stats): """ Constructs a CallGraph from the given @p stats object. @param[in] stats The @c pstats.Stats compatible object whose data is to be represented as the new CallGraph. """ # Function -> ( Outgoing Calls, Incoming Calls ) self.__functions = {} # Call -> ( Calling Function, Called Function ) self.__calls = {} # Indexes to look up Functions self.__fln_index = {} # (filename, line number, name) -> Function self.__namespace_index = {} # namespace name -> set of Functions self.add(stats)
def __bool__(self): """ Test whether the element is non-zero: return @c True if, and only if, it is non-zero. Otherwise return @c False. Implicit conversions to boolean (truth) values use this method, for example when @c x is an element of a Field: @code if x: do_something() @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def __eq__(self, other): """ Test whether another element @p other is equal to @p self; return @c True if that is the case. The infix operator @c == calls this method, for example: @code if self == other: do_something() @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def __add__(self, other): """ Return the sum of @p self and @p other. The infix operator @c + calls this method if @p self is the left summand, for example: @code result = self + other @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def __neg__(self): """ Return the additive inverse of @p self. The unary minus operator @c -x calls this method, for example: @code negated = -self @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def __mul__(self, other): """ Return the product of @p self and @p other. The infix operator @c + calls this method if @p self is the left factor, for example: @code result = self * other @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError def multiplicative_inverse(self): """ Return the multiplicative inverse of @p self. @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError
def __bool__(self): """ Test whether the element is non-zero: return @c True if, and only if, it is non-zero. Otherwise return @c False. Implicit conversions to boolean (truth) values use this method, for example when @c x is an element of a Field: @code if x: do_something() @endcode @exception NotImplementedError if this method is called; subclasses must implement this operation. """ raise NotImplementedError
method, for example:
method; for example:
def __neq__(self, other): """ Test whether another element @p other is different from @p self; return @c True if that is the case. The infix operator @c != calls this method, for example: @code if self != other: do_something() @endcode """ return not self.__eq__( other )
calls this method if @p self is the minuend (left element), for example:
calls this method if @p self is the minuend (left element); for example:
def __sub__(self, other): """ Return the difference of @p self and @p other. The infix operator @c - calls this method if @p self is the minuend (left element), for example: @code result = self - other @endcode """ return self.__add__( -other )
calls this method if @p self is the dividend, for example:
calls this method if @p self is the dividend; for example:
def __truediv__(self, other): """ Return the quotient of @p self and @p other. The infix operator @c / calls this method if @p self is the dividend, for example: @code result = self / other @endcode @exception ZeroDivisionError if @p other is zero. @exception TypeError if @p other lacks the multiplicative_inverse() method and cannot be cast to @p self's class. """ if not other: raise ZeroDivisionError try: other = self.__class__(other) return self.__mul__( other.multiplicative_inverse() ) except TypeError: return NotImplemented
print( "platform: {0}".format( platform.platform() ), file = timing_file ) print( "python: {0}".format( platform.python_version() ), file = timing_file ) print( "wall time (s): {0}".format( wall_time ), file = timing_file ) print( "user time (s): {0}".format( user_time ), file = timing_file ) print( "sys time (s): {0}".format( sys_time ), file = timing_file ) print( "cpu time (s): {0}".format( cpu_time), file = timing_file ) print( "max memory (kB): {0}".format( max_rss ), file = timing_file )
info = [ "node: {0}".format( platform.node() ), "platform: {0}".format( platform.platform() ), "python: {0}".format( platform.python_version() ), "date (Y/M/D h:m:s): {0}".format( datetime.now().strftime("%Y/%m/%d %H:%M:%S") ), "wall time (s): {0}".format( wall_time ), "user time (s): {0}".format( user_time ), "sys time (s): {0}".format( sys_time ), "cpu time (s): {0}".format( cpu_time), "max memory (kB): {0}".format( max_rss ), ]
def dump_data(self, extra_information = {}): if self.is_running(): self.stop() # Yes, this is a race condition. self.__profile_file.close() self.__profile.dump_stats( self.__profile_file.name )
print( "{0}: {1}".format( key, value ), file = timing_file )
info.append( "{0}: {1}".format( key, value ) ) print( "\n".join( info ), file=timing_file )
def dump_data(self, extra_information = {}): if self.is_running(): self.stop() # Yes, this is a race condition. self.__profile_file.close() self.__profile.dump_stats( self.__profile_file.name )
while line and not line.strip() and line.strip().startswith( "
while line and (not line.strip() or line.strip().startswith( "
def __iter__(self): # Register: increase the number of parsers with self.__lock() as data: parsers, current_offset, current_line = data self.__update( parsers + 1, current_offset, current_line ) # Iterate until the file ends line = self.__file.readline() while line: with self.__lock() as data: parsers, current_offset, current_line = data self.__file.seek( current_offset ) line = self.__file.readline() current_line += 1
yield current_line, tuple( line.split( self.__separator ) )
yield current_line, tuple( line.strip().split( self.__separator ) )
def __iter__(self): # Register: increase the number of parsers with self.__lock() as data: parsers, current_offset, current_line = data self.__update( parsers + 1, current_offset, current_line ) # Iterate until the file ends line = self.__file.readline() while line: with self.__lock() as data: parsers, current_offset, current_line = data self.__file.seek( current_offset ) line = self.__file.readline() current_line += 1
self.__input = [ ( "<stdin>", [ (0, tuple( arguments ) ) ] ) ]
self.__input = [] if arguments: self.__input.append( ( "<stdin>", [ (0, tuple( arguments ) ) ] ) )
def __init__(self, algorithm, arguments=sys.argv[1:], algorithm_version="<unknown>" ): self.__algorithm = algorithm self.__algorithm_version = algorithm_version options, arguments = self._parse_arguments( arguments, algorithm_version ) # __input is a list of pairs (<name>, <iterable>); # <iterable> is expected to return pairs (<item_number>, <item>). # See run(). self.__input = [ ( "<stdin>", [ (0, tuple( arguments ) ) ] ) ] # Fail early: immediately try to open the file if options.input_file: input_parser = ParallelParser( options.input_file ) self.__input.append( ( options.input_file, input_parser ) ) # Initialize the remaining attributes. self._open_output( options.output_file )
modulo_primes = greedy_prime_factors(
torsion_primes = greedy_prime_factors(
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
if 2 in modulo_primes:
if 2 in torsion_primes:
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
modulo_primes.remove( 2 )
torsion_primes.remove( 2 )
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
for prime in modulo_primes:
for prime in torsion_primes:
def frobenius_trace(curve): """ Compute the trace of the Frobenius endomorphism for the given EllpiticCurve @p curve. This is an implementation of Schoof's original algorithm for counting the points of an elliptic curve over a finite field. @return The trace @f$ t @f$ of the Frobenius endomorphism. The number of points on the curve then is @f$ q + 1 - t @f$, where @f$ q @f$ is the size of the finite field over which the curve was defined. """ trace_congruences = [] search_range = hasse_frobenius_trace_range( curve.field() ) modulo_primes = greedy_prime_factors( len(search_range), curve.field().characteristic() ) # To avoid multivariate polynomial arithmetic, make l=2 a special case. if 2 in modulo_primes: trace_congruences.append( frobenius_trace_mod_2( curve ) ) modulo_primes.remove( 2 ) torsion_group = LTorsionGroup( curve ) for prime in modulo_primes: trace_congruences.append( frobenius_trace_mod_l( torsion_group( prime ) ) ) trace_congruence = solve_congruence_equations( trace_congruences ) return representative_in_range( trace_congruence, search_range )
if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line)
if not os.path.islink(line) and accesscontrollist.hasacl(line) and not options.ignoreacl: err = "filetoversion has a 'deny' in ACL permissions (ls -lde %s: %s) \n \ This program is currently not clever enough to check if you have permission to move/delete this file. \n \ To avoid this problem remove deny permissions from the access control entries \n \ or rerun this command with --ignoreacl" % (line, accesscontrollist.getacl(line)) logging.warn(err) elif not os.path.islink(line): acl = None
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
acl = None
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
acl = removeacl(line) util.move(line, line+".beforesyncher") if acl is not None: accesscontrollist.setacl(line, acl)
acl = accesscontrollist.removeacl(line) util.move(line, line+"-beforesyncher")
def makesymlinks(repospath): reposfilepath = os.path.abspath(repospath) with open(os.path.join(repospath, SYNCHER_DB_FILENAME)) as db: try: for line in db: line = line.strip() if not os.path.islink(line): logging.info("creating symlink from %s to %s", reposfilepath + line, line) if not options.dry: if os.path.exists(line): acl = None if options.ignoreacl: acl = removeacl(line) util.move(line, line+".beforesyncher")#repospathtoputnewfilein) if acl is not None: accesscontrollist.setacl(line, acl) elif not os.path.exists(os.path.dirname(line)): created = util.makedirs(os.path.dirname(line)) util.symlink(reposfilepath + line, line) else: if not os.path.realpath(line) == reposfilepath + line: logging.warn("%s is already a symbolic link to %s not %s. it will not be followed and linked properly to repository" % (line, os.path.realpath(line), reposfilepath + line)) except Exception as e: logging.warn("ROLLING BACK because of %s" % e) undo.rollback() raise
if self.val.type.code == gdb.TYPE_CODE_RANGE:
if self.val['code'] == gdb.TYPE_CODE_RANGE:
def to_string(self): """Return a pretty-printed image of our main_type. """ fields = [] fields.append("name = %s" % self.val['name']) fields.append("tag_name = %s" % self.val['tag_name']) fields.append("code = %s" % self.val['code']) fields.append("flags = [%s]" % self.flags_to_string()) fields.append("owner = %s" % self.owner_to_string()) fields.append("target_type = %s" % self.val['target_type']) fields.append("vptr_basetype = %s" % self.val['vptr_basetype']) if self.val['nfields'] > 0: for fieldno in range(self.val['nfields']): fields.append(self.struct_field_img(fieldno)) if self.val.type.code == gdb.TYPE_CODE_RANGE: fields.append(self.bounds_img()) fields.append(self.type_specific_img())
<<<<<<< HEAD
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
======= >>>>>>> e5a5e37cb95fd2b91a757284cc694e01ea9da987
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
======= name="Nfo_" + media.name results.Append(MetadataSearchResult(id=media.id,name=name,year=3000,lang=lang,score=100)) >>>>>>> e5a5e37cb95fd2b91a757284cc694e01ea9da987
def search(self, results, media, lang): Log("Searching") fname=Media id=media.name pageUrl="http://localhost:32400/library/metadata/" + media.id page=HTTP.Request(pageUrl) Log(media.primary_metadata) nfoXML = XML.ElementFromURL(pageUrl).xpath('//MediaContainer/Video/Media/Part')[0] path1=nfoXML.get('file') path = os.path.dirname(path1) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=path1.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: media.id = nfoXML.xpath('./id')[0].text except: pass
<<<<<<< HEAD
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director
======= metadata.directors.clear() try: metadata.directors.add(nfoXML.xpath("director")[0].text) except: pass >>>>>>> e5a5e37cb95fd2b91a757284cc694e01ea9da987
def scrapeNfo(self, metadata, media, lang): Log("all your datas are belong to us") Log('UPDATE: ' + media.items[0].parts[0].file) path = os.path.dirname(media.items[0].parts[0].file) id=media.title nfoFile='' Log(path) if os.path.exists(path): for f in os.listdir(path): if f.split(".")[-1].lower() == "nfo": nfoName=f.split(".")[0] fname1=media.items[0].parts[0].file.split("/")[-1] fname2=fname1.split(".")[0] if fname2.lower() == "the": fname2=fname2+"." + fname1.split(".")[1] if len(fname2) < len(nfoName): nfoName2=nfoName[len(fname2)] else: nfoName2=nfoName Log(len(fname2)) Log(nfoName2) if fname2==nfoName2: nfoFile = os.path.join(path, f) nfoText = Core.storage.load(nfoFile) nfoTextLower = nfoText.lower() if nfoTextLower.count('<movie>') > 0 and nfoTextLower.count('</movie>') > 0: #likely an xbmc nfo file nfoXML = XML.ElementFromString(nfoText).xpath('//movie')[0] #title try: metadata.title = nfoXML.xpath('./title')[0].text except: pass #summary try: metadata.summary = nfoXML.xpath('./plot')[0].text except: pass #year try: metadata.year = int(nfoXML.xpath("year")[0].text) except: pass #rating try: metadata.rating = float(nfoXML.xpath('./rating')[0].text) except: pass Log(metadata.rating) #content rating try: metadata.content_rating = nfoXML.xpath('./mpaa')[0].text except: pass #director
======= metadata.genres.clear() Log("cleared genres") for r in genres: Log(r) metadata.genres.add(r) Log(metadata.genres) metadata.roles.clear() for actor in nfoXML.findall('./actor'): role = metadata.roles.new() try: role.role = actor.xpath("role")[0].text except: pass try: role.actor = actor.xpath("name")[0].text except: pass try: role.photo = actor.xpath("thumb")[0].text except: pass if role.photo != 'None': data = HTTP.Request(actor.xpath("thumb")[0].text) Log('Added Thumbnail for: ' + role.actor) name = metadata.title if name not in metadata.posters: metadata.posters[name] = Proxy.Media(data) break else: continue Log("++++++++++++++++++++++++") Log("Movie nfo Information") Log("++++++++++++++++++++++++") Log("Title: " + str(metadata.title)) Log("id: " + str(metadata.guid)) Log("Summary: " + str(metadata.summary)) Log("Year: " + str(metadata.year)) Log("IMDB rating: " + str(metadata.rating)) Log("Content Rating: " + str(metadata.content_rating)) Log("Director " + str(metadata.directors)) Log("Studio: " + str(metadata.studio)) Log("Duration: " + str(metadata.duration)) Log("Genres") for r in metadata.genres: Log("genres: " + r) Log(metadata.id) Log("++++++++++++++++++++++++") return id, metadata >>>>>>> e5a5e37cb95fd2b91a757284cc694e01ea9da987
def grabPoster(pUrl=thumb.text, i=i): posterUrl = pUrl Log("Adding: " + pUrl) thumbpic = HTTP.Request(pUrl) metadata.posters[posterUrl] = Proxy.Preview(thumbpic, sort_order = i)
return _('''
if mem['SwapTotal'] == 0: swap_message = _("no virtual memory installed.") else: swap_message = \ _("Virtual memory: %.2f Gb total, <strong>%.0f%% free<strong>.") % \ (mem['SwapTotal'], (mem['SwapFree'] * 100.0 / mem['SwapTotal'])) return (_('''
def compute_mem(line, x): #logging.debug(line[0:-1] + " -> " + re.split('\W+', line)[1])
Virtual memory: %.2f Gb total, <strong>%.0f%% free<strong>. ''') % (s, cpus, model, mem['MemTotal'], (mem['Inactive'] + mem['Active']), mem['Cached'], mem['Buffers'], mem['SwapTotal'], (mem['SwapFree'] * 100.0 / mem['SwapTotal']) )
%s''') % (s, cpus, model, mem['MemTotal'], (mem['Inactive'] + mem['Active']), mem['Cached'], mem['Buffers'], swap_message))
def compute_mem(line, x): #logging.debug(line[0:-1] + " -> " + re.split('\W+', line)[1])
LicornMessage(data=text_message),
LicornMessage(data=text_message, channel=1),
def output(self, text_message): return current_thread().listener.process( LicornMessage(data=text_message), options.msgproc.getProxy())
self.reload()
self.reload(full=False)
def __init__(self, configuration): """ Create the user accounts list from the underlying system. """
def reload(self):
def reload(self, full=True):
def reload(self): """ Load (or reload) the data structures from the system data. """
users.reload()
def main(uri, http_user, sort = "login", order = "asc"): """ display all users in a nice HTML page. """ start = time.time() groups.reload() users.reload() # profiles.reload() u = users.users g = groups.groups p = profiles.profiles groups.Select(filters.PRIVILEGED) pri_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.RESPONSIBLE) rsp_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.GUEST) gst_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] groups.Select(filters.STANDARD) std_grps = [ g[gid]['name'] for gid in groups.filtered_groups ] accounts = {} ordered = {} totals = {} prof = {} for profile in p: prof[groups.name_to_gid(profile)] = p[profile] totals[p[profile]['name']] = 0 totals[_('Standard account')] = 0 title = _("User accounts") data = w.page_body_start(uri, http_user, ctxtnav, title) if order == "asc": reverseorder = "desc" else: reverseorder = "asc" data += '<table>\n <tr>\n' for (sortcolumn, sortname) in ( ("gecos", _("Full name")), ("login", _("Identifier")), ("profile", _("Profile")), ("locked", _("Locked")) ): if sortcolumn == sort: data += ''' <th><img src="/images/sort_%s.gif" alt="%s order image" />&#160; <a href="/users/list/%s/%s" title="%s">%s</a> </th>\n''' % (order, order, sortcolumn, reverseorder, _("Click to sort in reverse order."), sortname) else: data += ''' <th><a href="/users/list/%s/asc" title="%s">%s</a></th>\n''' % (sortcolumn, _("Click to sort on this column."), sortname) data += ' </tr>\n' def html_build_compact(index, accounts = accounts): uid = ordered[index] login = u[uid]['login'] edit = (_('''<em>Click to edit current user account parameters:</em> <br /> UID: <strong>%d</strong><br /> GID: %d (primary group <strong>%s</strong>)<br /><br /> Groups:&#160;<strong>%s</strong><br /><br /> Privileges:&#160;<strong>%s</strong><br /><br /> Responsabilities:&#160;<strong>%s</strong><br /><br /> Invitations:&#160;<strong>%s</strong><br /><br /> ''') % ( uid, u[uid]['gidNumber'], g[u[uid]['gidNumber']]['name'], ", ".join(filter(lambda x: x in std_grps, u[uid]['groups'])), ", ".join(filter(lambda x: x in pri_grps, u[uid]['groups'])), ", ".join(filter(lambda x: x in rsp_grps, u[uid]['groups'])), ", ".join(filter( lambda x: x in gst_grps, u[uid]['groups'])))).replace( '<','&lt;').replace('>','&gt;') html_data = ''' <tr class="userdata"> <td class="paddedleft"> <a href="/users/edit/%s" title="%s" class="edit-entry">%s</a> </td> <td class="paddedright"> <a href="/users/edit/%s" title="%s" class="edit-entry">%s</a> </td> <td style="text-align:center;">%s</td> ''' % (login, edit, u[uid]['gecos'], login, edit, login, accounts[uid]['profile_name']) if u[uid]['locked']: html_data += ''' <td class="user_action_center"> <a href="/users/unlock/%s" title="%s"> <img src="/images/16x16/locked.png" alt="%s"/></a> </td> ''' % (login, _("Unlock password (re-grant access to machines)."), _("Remove account.")) else: html_data += ''' <td class="user_action_center"> <a href="/users/lock/%s" title="%s"> <img src="/images/16x16/unlocked.png" alt="%s"/></a> </td> ''' % (login, _("Lock password (revoke access to machines)."), _("Lock account.")) html_data += ''' <td class="user_action"> <a href="/users/skel/%s" title="%s" class="reapply-skel"> <span class="delete-entry">&nbsp;&nbsp;&nbsp;&nbsp;</span></a> </td> <td class="user_action"> <a href="/users/delete/%s" title="%s" class="delete-entry"> <span class="delete-entry">&nbsp;&nbsp;&nbsp;&nbsp;</span></a> </td> </tr> ''' % (login, _('''Reapply origin skel data in the personnal ''' '''directory of user. This is usefull''' ''' when user has lost icons, or modified too much his/her ''' '''desktop (menus, panels and so on). This will get all his/her desktop back.'''), login, _("Definitely remove account from the system.")) return html_data users.Select(filters.STANDARD) for uid in users.filtered_users: user = u[uid] login = user['login'] # we add the login to gecosValue and lockedValue to be sure to obtain # unique values. This prevents problems with empty or non-unique GECOS # and when sorting on locked status (accounts would be overwritten and # lost because sorting must be done on unique values). accounts[uid] = { 'login' : login, 'gecos' : user['gecos'] + login , 'locked' : str(user['locked']) + login } try: p = prof[user['gidNumber']]['name'] except KeyError: p = _("Standard account") accounts[uid]['profile'] = "%s %s" % ( p, login ) accounts[uid]['profile_name'] = p totals[p] += 1 # index on the column choosen for sorting, and keep trace of the uid # to find account data back after ordering. ordered[hlstr.validate_name(accounts[uid][sort])] = uid memberkeys = ordered.keys() memberkeys.sort() if order == "desc": memberkeys.reverse() data += ''.join(map(html_build_compact, memberkeys)) def print_totals(totals): output = "" for total in totals: if totals[total] != 0: output += ''' <tr class="list_total"> <td colspan="3" class="total_left">%s</td> <td colspan="3" class="total_right">%d</td> </tr> ''' % (_("number of <strong>%s</strong>:") % total, totals[total]) return output data += ''' <tr> <td colspan="6">&#160;</td></tr> %s <tr class="list_total"> <td colspan="3" class="total_left">%s</td> <td colspan="3" class="total_right">%d</td> </tr>
self.current_target_object, self.args, self.kwargs
self.current_target, self.args, self.kwargs
def dump_status(self, long_output=False, precision=None): """ get detailled thread status. """
self.current_target_object) \
self.current_target) \
def dump_status(self, long_output=False, precision=None): """ get detailled thread status. """
execute([ 'sudo', 'rm', '-rf', '%s/*' % configuration.home_backup_dir, '%s/*' % configuration.home_archive_dir ]) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi'])
for directory in ( configuration.home_backup_dir, configuration.home_archive_dir ): clean_dir_contents(directory) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi'])
def clean_system(): """ Remove all stuff to make the system clean, testsuite-wise.""" test_message('''cleaning system from previous runs.''') # delete them first in case of a previous failed testsuite run. # don't check exit codes or such, this will be done later. for argument in ( ['user', '''toto,tutu,tata,titi,test,utilisager.normal,''' \ '''test.responsibilly,utilicateur.accentue,user_test,''' \ '''grp-acl-user,utest_267,user_test2,user_test3,user_testsys,''' \ '''user_testsys2,user_testsys3''', '--no-archive'], ['profile', '''--group=utilisagers,responsibilisateurs,''' '''profil_test''', '--del-users', '--no-archive'], ['group', '''test_users_A,test_users_B,groupeA,B-Group_Test,''' \ '''groupe_a_skel,ACL_tests,MOD_tests,SYSTEM-test,SKEL-tests,''' \ '''ARCHIVES-test,group_test,group_testsys,group_test2,''' '''group_test3,GRP-ACL-test,gtest_267,group_testsys''' ], ['privilege', '--name=group_test' ] ): execute(DEL + argument) execute([ 'sudo', 'rm', '-rf', '%s/*' % configuration.home_backup_dir, '%s/*' % configuration.home_archive_dir ]) execute(ADD + ['group', '--system', 'acl,admins,remotessh,licorn-wmi']) test_message('''system cleaned from previous testsuite runs.''')
ScenarioTest([ [ 'sudo', 'rm', '-vrf', '%s/*' % configuration.home_archive_dir ],
clean_dir_contents(configuration.home_archive_dir) ScenarioTest([
def chk_acls_cmds(group, subdir=None): return [ 'sudo', 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names['plural'], group, '/%s' % subdir if subdir else '') ]
[ 'sudo', 'getfacl', '-R', configuration.home_archive_dir ], [ 'sudo', 'rm', '-vrf', '%s/*' % configuration.home_archive_dir ]
[ 'sudo', 'getfacl', '-R', configuration.home_archive_dir ]
def chk_acls_cmds(group, subdir=None): return [ 'sudo', 'getfacl', '-R', '%s/%s/%s%s' % ( configuration.defaults.home_base_path, configuration.groups.names['plural'], group, '/%s' % subdir if subdir else '') ]
logging.warning( 'Adding a default profile on the system (this is mandatory).')
logging.warning('''Adding a default %s profile on the system ''' '''(this is mandatory).''' % styles.stylize(styles.ST_NAME, 'Users'))
def checkDefaultProfile(self): """If no profile exists on the system, create a default one with system group "users"."""
lenghts[0], lenghts[1], lenghts[2], lenghts[3], lenghts[4], lenghts[5], lenghts[6], lenghts[7]
lengths[0], lengths[1], lengths[2], lengths[3], lengths[4], lengths[5], lengths[6], lengths[7]
def run(self): logging.progress("%s: thread running." % (self.name)) Thread.run(self)
if(lenghts[7] > 0):
if(lengths[7] > 0):
def run(self): logging.progress("%s: thread running." % (self.name)) Thread.run(self)
dest = list(user['groups'].copy())
dest = list(user['groups'][:])
def edit(uri, http_user, login): """Edit an user account, based on login.""" users.reload() groups.reload() # profiles.reload() title = _('Edit account %s') % login if protected_user(login): return w.forgery_error(title) data = w.page_body_start(uri, http_user, ctxtnav, title, False) try: user = users.users[users.login_to_uid(login)] try: profile = \ profiles.profiles[ groups.groups[user['gidNumber']]['name'] ]['name'] except KeyError: profile = _("Standard account") dbl_lists = {} for filter, titles, id in groups_filters_lists_ids: groups.Select(filter) dest = list(user['groups'].copy()) source = [ groups.groups[gid]['name'] \ for gid in groups.filtered_groups ] for current in dest[:]: try: source.remove(current) except ValueError: dest.remove(current) dest.sort() source.sort() dbl_lists[filter] = w.doubleListBox(titles, id, source, dest) form_name = "user_edit_form" data += '''<div id="edit_form">
count += 1
def fork_wmi(opts, start_wmi = True): """ Start the Web Management Interface (fork it). """ # FIXME: implement start_wmi in argparser module. try: if os.fork() == 0: # FIXME: drop_privileges() → become setuid('licorn:licorn') process.write_pid_file(wpid_path) if opts.daemon: process.use_log_file(wlog_path) pname = '%s/wmi' % dname process.set_name(pname) logging.progress("%s: starting (pid %d)." % (pname, os.getpid())) setup_signals_handler(pname) if opts.wmi_listen_address: # the CLI launch argument has priority over the configuration # directive, for testing purposes. listen_address = opts.wmi_listen_address elif configuration.daemon.wmi.listen_address: listen_address = configuration.daemon.wmi.listen_address else: # the fallback is localhost listen_address = 'localhost' if listen_address.startswith('if:') \ or listen_address.startswith('iface:') \ or listen_address.startswith('interface:'): raise NotImplementedError( 'getting interface address is not yet implemented.') logging.progress('%s: bind on listen address %s.' % ( pname, styles.stylize(styles.ST_ADDRESS, listen_address))) count = 0 while True: # try creating an http server. # if it fails because of socket already in use, just retry # forever, displaying a message every second. # # when creation succeeds, break the loop and serve requets. count += 1 try: httpd = TCPServer((listen_address, wmi_port), WMIHTTPRequestHandler) break except socket.error, e: if e[0] == 98: logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count)) time.sleep(1) else: logging.error("%s/wmi: socket error %s." % (dname, e)) return httpd.serve_forever() except OSError, e: logging.error("%s/wmi: fork failed: errno %d (%s)." % (dname, e.errno, e.strerror)) except KeyboardInterrupt: logging.warning('%s/wmi: terminating on interrupt signal.' % dname) raise SystemExit
logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count))
logging.warning("%s/wmi: socket already in use. waiting (total: %ds)." % (dname, count)) count += 1
def fork_wmi(opts, start_wmi = True): """ Start the Web Management Interface (fork it). """ # FIXME: implement start_wmi in argparser module. try: if os.fork() == 0: # FIXME: drop_privileges() → become setuid('licorn:licorn') process.write_pid_file(wpid_path) if opts.daemon: process.use_log_file(wlog_path) pname = '%s/wmi' % dname process.set_name(pname) logging.progress("%s: starting (pid %d)." % (pname, os.getpid())) setup_signals_handler(pname) if opts.wmi_listen_address: # the CLI launch argument has priority over the configuration # directive, for testing purposes. listen_address = opts.wmi_listen_address elif configuration.daemon.wmi.listen_address: listen_address = configuration.daemon.wmi.listen_address else: # the fallback is localhost listen_address = 'localhost' if listen_address.startswith('if:') \ or listen_address.startswith('iface:') \ or listen_address.startswith('interface:'): raise NotImplementedError( 'getting interface address is not yet implemented.') logging.progress('%s: bind on listen address %s.' % ( pname, styles.stylize(styles.ST_ADDRESS, listen_address))) count = 0 while True: # try creating an http server. # if it fails because of socket already in use, just retry # forever, displaying a message every second. # # when creation succeeds, break the loop and serve requets. count += 1 try: httpd = TCPServer((listen_address, wmi_port), WMIHTTPRequestHandler) break except socket.error, e: if e[0] == 98: logging.warning("%s/wmi: socket already in use. waiting (total: %dsec)." % (dname, count)) time.sleep(1) else: logging.error("%s/wmi: socket error %s." % (dname, e)) return httpd.serve_forever() except OSError, e: logging.error("%s/wmi: fork failed: errno %d (%s)." % (dname, e.errno, e.strerror)) except KeyboardInterrupt: logging.warning('%s/wmi: terminating on interrupt signal.' % dname) raise SystemExit
def check_dirs_and_contents_perms_and_acls_new(dirs_infos, batch=None,
def check_dirs_and_contents_perms_and_acls_new(dirs_infos, batch=False,
def check_dirs_and_contents_perms_and_acls_new(dirs_infos, batch=None, auto_answer=None): """ general function to check file/dir """ def check_one_dir_and_acl(dir_info, batch=batch, auto_answer=auto_answer): all_went_ok = True # save desired user and group owner of the file/dir try: if dir_info.user: uid = dir_info['user'] else: uid = -1 if dir_info.group and dir_info.group != '': gid = dir_info['group'] else: gid = -1 except KeyError, e: raise exceptions.LicornRuntimeError('''You just encountered a ''' '''programmer bug. Get in touch with robin@licorn.org (was: ''' '''%s).''' % e) except exceptions.LicornRuntimeException, e: raise exceptions.LicornRuntimeError('''The uid/gid you want to ''' '''check against does not exist on this system ! This ''' '''shouldn't happen and is probably a programmer/packager ''' '''bug. Get in touch with dev@licorn.org (was: %s).''' % e) # Does the file/dir exist ? try: entry_stat = os.lstat(dir_info['path']) except OSError, e: if e.errno == 13: raise exceptions.InsufficientPermissionsError(str(e)) elif e.errno == 2: raise exceptions.DoesntExistsException(str(e)) else: # FIXME: do more things to recover from more system errors… raise e # if it is a file if ( entry_stat.st_mode & 0170000 ) == S_IFREG: logging.progress("Checking file %s…" % stylize(ST_PATH, dir_info['path'])) if dir_info.files_perm and dir_info.user \ and dir_info.group: check_perms( file_type=S_IFREG, dir_info=dir_info, batch=batch) # if it is a dir elif ( entry_stat.st_mode & 0170000 ) == S_IFDIR: logging.progress("Checking dir %s…" % stylize(ST_PATH, dir_info['path'])) # if the directory ends with '/' that mean that we will only # affect the content of the dir. # the dir itself will receive default licorn ACL rights (those # defined in the configuration) if dir_info.path[-1] == '/': dir_info_root = dir_info.copy() dir_info_root.root_dir_acl = True dir_info_root.root_dir_perm = "%s,g:%s:rwx,%s" % ( LMC.configuration.acls.acl_base, LMC.configuration.defaults.admin_group, LMC.configuration.acls.acl_mask) dir_info_root.group = "acl" # now that the "root dir" has its special treatment, # prepare dir_info for the rest (its contents) dir_info.path = dir_info.path[:-1] else: dir_info_root = dir_info logging.progress("Checking %s's %s…" % ( stylize(ST_PATH, dir_info['path']), "ACLs" if dir_info.root_dir_acl else "posix perms")) # deal with root dir check_perms( is_root_dir=True, file_type=S_IFDIR, dir_info=dir_info_root, batch=batch) if dir_info.files_perm != None or dir_info.dirs_perm != None: try: exclude_list = dir_info.exclude except AttributeError : exclude_list = [] if dir_info.files_perm != None: logging.progress("Checking %s's contents %s…" % ( stylize(ST_PATH, dir_info['path']), 'ACLs' if dir_info.content_acl else 'posix perms')) if dir_info.dirs_perm != None: dir_path = dir_info['path'] for dir in minifind(dir_path, exclude=exclude_list, mindepth=1, type=S_IFDIR): dir_info.path=dir check_perms( file_type=S_IFDIR, dir_info=dir_info, batch=batch) # deal with files inside root dir for file in minifind(dir_path, exclude=exclude_list, mindepth=1, type=S_IFREG): dir_info.path = file check_perms( file_type=S_IFREG, dir_info=dir_info, batch=batch) else: logging.warning('''The type of %s is not recognised by the ''' '''check_user() function.''' % dir_info['path']) return all_went_ok if dirs_infos != None: # first, check user_home try: check_one_dir_and_acl(dirs_infos._default) except AttributeError: pass # check all specials_dirs for dir_info in dirs_infos: if check_one_dir_and_acl(dir_info) is False: return False else: return True else: raise exceptions.BadArgumentError( "You must pass something through dirs_infos to check!")
if daemon.cmdlistener.role == licornd_roles.SERVER:
if LMC.configuration.licornd.role == licornd_roles.SERVER:
def acceptHost(self, daemon, connection): """ Very basic check for the connection. """ client_addr, client_socket = connection.addr
logging.warning('''%s: socket already in use. ''' '''waiting (total: %ds).''' % (self.name, count))
logging.warning('''%s: %s. ''' '''waiting (total: %ds).''' % (self.name, e, count))
def run(self): assert ltrace('thread', '%s running' % self.name)
self.pyro_daemon.shutdown(True)
def run(self): assert ltrace('thread', '%s running' % self.name)
return (strip_moving_data(output), retcode)
def RunCommand(self, cmdnum, batch=False):
ScenarioTest(commands, descr="integrated help").Run()
ScenarioTest(commands, descr='''test integrated help of all CLI commands''' ).Run()
def test_integrated_help(): """Test extensively argmarser contents and intergated help.""" commands = [] for program in (GET, ADD, MOD, DEL, CHK): commands.extend([ program + ['-h'], program + ['--help']]) if program == ADD: modes = [ 'user', 'users', 'group', 'profile' ] elif program == MOD: modes = [ 'configuration', 'user', 'group', 'profile' ] elif program == DEL: modes = [ 'user', 'group', 'groups', 'profile' ] elif program == GET: modes = [ 'user', 'users', 'passwd', 'group', 'groups', 'profiles', 'configuration' ] elif program == CHK: modes = [ 'user', 'users', 'group', 'groups', 'profile', 'profiles', 'configuration' ] for mode in modes: if program == GET and mode == 'configuration': commands.append(program + [ mode ]) else: commands.extend([ program + [ mode, '-h' ], program + [ mode, '--help' ] ]) ScenarioTest(commands, descr="integrated help").Run()