rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
("type", "B"),
("mode", "I"), ("__pad0", "4x"),
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
("links", "I"),)
("nlink", "I"), ("uid", "I"), ("gid", "I"), ("rdev", "Q"), ("atime", "I"), ("atimens", "I"), ("mtime", "I"), ("mtimens", "I"), ("ctime", "I"), ("ctimens", "I"), ("__pad1", "68x"),) def get_st_times(self): return dict(( ("st_" + a, self[a] + self[a + "ns"] / (10 ** 9)) for a in (b + "time" for b in "amc"))) assert Inode.size == 128, Inode.size
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
assert BootRecord.size <= 256
assert BootRecord.size <= 256, "This will clobber the root directory entry"
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0)
def __init__(self, path): self.f = open(path, "r+b")
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0) br = BootRecord.from_fileobj(self.f) assert br["ident"].rstrip("\0") == "clfs", repr(br["ident"]) assert br["version"] == 1 self.cluster_size = br["clrsize"] self.master_region_cluster_count = br["mstrclrs"] self.allocation_table_cluster_count = br["atabclrs"] self.data_region_cluster_count = br["dataclrs"] self.filesystem_cluster_count = \ self.master_region_cluster_count + \ self.allocation_table_cluster_count + \ self.data_region_cluster_count
self.filesystem_cluster_count = \ self.master_region_cluster_count + \
@property def filesystem_cluster_count(self): return self.master_region_cluster_count + \
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0) br = BootRecord.from_fileobj(self.f) assert br["ident"].rstrip("\0") == "clfs", repr(br["ident"]) assert br["version"] == 1 self.cluster_size = br["clrsize"] self.master_region_cluster_count = br["mstrclrs"] self.allocation_table_cluster_count = br["atabclrs"] self.data_region_cluster_count = br["dataclrs"] self.filesystem_cluster_count = \ self.master_region_cluster_count + \ self.allocation_table_cluster_count + \ self.data_region_cluster_count
if inode_struct["type"] != TYPE_DIRECTORY:
if not S_ISDIR(inode_struct["mode"]):
def read_directory(self, inode): inode_struct = self.get_inode_struct(inode) if inode_struct["type"] != TYPE_DIRECTORY: raise ClfsError(ENOTDIR) offset = 0 while offset < inode_struct["size"]: dirent = DirEntry.unpack(self.read_inode_data( inode, offset, DirEntry.size)) if dirent["name"].rstrip("\0"): yield dirent offset += dirent.size
for dirent in self.read_directory(cur_dirent["inode"]):
for dirent in self.read_directory(cur_dirent["ino"]):
def get_dir_entry(self, path): for name in path.split("/"): if not name: cur_dirent = self.get_root_dir_entry() else: # pdb.set_trace() for dirent in self.read_directory(cur_dirent["inode"]): if dirent["name"].rstrip("\0") == name: cur_dirent = dirent break else: raise ClfsError(ENOENT) return cur_dirent
def iter_allocation_table(self): self.seek_cluster(self.master_region_cluster_count) for index in xrange(self.data_region_cluster_count): yield struct.unpack("I", self.f.read(4))[0] def claim_free_cluster(self): self.seek_cluster(self.master_region_cluster_count) for index in xrange(self.data_region_cluster_count): cluster = struct.unpack("I", self.f.read(4))[0] if cluster == CLUSTER_FREE: self.f.seek(-4, os.SEEK_CUR) self.f.write(struct.pack("I", CLUSTER_END_OF_CHAIN)) return index + self.master_region_cluster_count + self.allocation_table_cluster_count else: assert False, "Filesystem is full?" def first_data_region_cluster_number(self): return self.master_region_cluster_count + self.allocation_table_cluster_count def valid_data_region_cluster_number(self, clno): return self.first_data_region_cluster_number() \ <= clno \ < self.filesystem_cluster_count def seek_cluster_number(self, clno): assert self.valid_data_region_cluster_number(clno), clno self.safe_seek(self.cluster_size * self.master_region_cluster_count + 4 * (clno - self.first_data_region_cluster_number())) def set_cluster_number(self, clno, value): self.seek_cluster_number(clno) logging.debug("Setting cluster number %i->%i", clno, value) self.f.write(struct.pack("I", value))
dirent_for_path = get_dir_entry
def get_dir_entry(self, path): for name in path.split("/"): if not name: cur_dirent = self.get_root_dir_entry() else: # pdb.set_trace() for dirent in self.read_directory(cur_dirent["inode"]): if dirent["name"].rstrip("\0") == name: cur_dirent = dirent break else: raise ClfsError(ENOENT) return cur_dirent
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode)
def write_inode_data(self, ino, offset, buffer): inode_struct = self.get_inode_struct(ino)
#def write(self, path, buf, offset):
inode,
ino,
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode) data_offset = inode_struct.size write_size, new_size = self.write_to_chain( inode, inode_struct["size"] + data_offset, offset + data_offset, buffer) assert write_size == len(buffer), write_size expected_size = data_offset + max(offset + write_size, inode_struct["size"]) assert new_size == expected_size, (new_size, expected_size) #assert new_size == # just update it anyway, i'll have to update times too inode_struct["size"] = new_size - data_offset #pdb.set_trace() assert (inode_struct.size, new_size) == self.write_to_chain( inode, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(inode)["size"] == new_size - data_offset return write_size
inode, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(inode)["size"] == new_size - data_offset
ino, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(ino)["size"] == new_size - data_offset
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode) data_offset = inode_struct.size write_size, new_size = self.write_to_chain( inode, inode_struct["size"] + data_offset, offset + data_offset, buffer) assert write_size == len(buffer), write_size expected_size = data_offset + max(offset + write_size, inode_struct["size"]) assert new_size == expected_size, (new_size, expected_size) #assert new_size == # just update it anyway, i'll have to update times too inode_struct["size"] = new_size - data_offset #pdb.set_trace() assert (inode_struct.size, new_size) == self.write_to_chain( inode, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(inode)["size"] == new_size - data_offset return write_size
def create_node(self, path, type):
def create_node(self, path, mode): """Create an allocate a new inode, update relevant structures elsewhere"""
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster()) # write the new dirent at the end of the parent directory assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) # initialize the new inode #pdb.set_trace() new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack())
parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster())
create_rootdir = bool( (not node_basename) and (node_dirname == parent_dirname == "/")) if create_rootdir: assert S_ISDIR(mode) new_inode = Inode(size=0, uid=0, gid=0, rdev=0, mode=mode) sec, nsec = time_as_posix_spec(time()) for field_name in ("atime", "mtime", "ctime"): new_inode[field_name] = sec for field_name in ("atimens", "mtimens", "ctimens"): new_inode[field_name] = nsec del sec, nsec if S_ISDIR(mode): new_inode["nlink"] = 2 else: new_inode["nlink"] = 1 new_dirent = DirEntry(ino=self.claim_free_cluster()) if create_rootdir: new_dirent["name"] = "/" assert new_dirent["ino"] == self.first_data_region_cluster_number, new_dirent["ino"] else: parent_ino = self.dirent_for_path(node_dirname)["ino"] for sibling_dirent in self.read_directory(parent_ino): if sibling_dirent["name"] == node_basename: raise ClfsError(EEXIST) else: new_dirent["name"] = node_basename assert (new_inode.size, new_inode.size) == self.write_to_chain( cluster=new_dirent["ino"], size=0, offset=0, buffer=new_inode.pack())
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster()) # write the new dirent at the end of the parent directory assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) # initialize the new inode #pdb.set_trace() new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack())
assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack()) def create_filesystem(device_path): device_file = open(device_path, "r+b") device_size = os.fstat(device_file.fileno()).st_size
if create_rootdir: self.seek_root_dirent() self.f.write(new_dirent.pack()) else: assert new_dirent.size == self.write_inode_data( ino=parent_ino, offset=self.get_inode_struct(parent_ino)["size"], buffer=new_dirent.pack(),) def generate_bootrecord(device_size):
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster()) # write the new dirent at the end of the parent directory assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) # initialize the new inode #pdb.set_trace() new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack())
gtk.STOCK_NEW, gtk.RESPONSE_OK,
gtk.STOCK_OK, gtk.RESPONSE_OK,
def add_share(self, button): namelbl = gtk.Label("Share name:")
subprocess.check_call(["xdg-open", url])
try: subprocess.check_call(["xdg-open", url]) except subprocess.CalledProcessError as exc: print exc else: return
def browse_peer_by_url(self, url): """Open the given peer URL with the most natural file manager for the current platform that we can find""" import os, subprocess if os.name == "nt": # this is how it's done programmatically? except that it won't invoke # the default file manager (explorer) on winders #ShellExecute(None, "explore", url, None, None, SW_SHOWNORMAL)
subprocess.check_call(["nautilus", url])
try: subprocess.check_call(["nautilus", url]) except subprocess.CalledProcessError as exc: print exc else: return
def browse_peer_by_url(self, url): """Open the given peer URL with the most natural file manager for the current platform that we can find""" import os, subprocess if os.name == "nt": # this is how it's done programmatically? except that it won't invoke # the default file manager (explorer) on winders #ShellExecute(None, "explore", url, None, None, SW_SHOWNORMAL)
cluster_read_size = min(read_size, self.cluster_size - read_offset)
cluster_read_size = min(read_size, self.cluster_size - read_offset, chain_size)
def read_from_chain(self, first_cluster, chain_size, read_offset, read_size): if chain_size <= 0: return "" #assert read_offset + read_size <= chain_size, (read_offset, read_size, chain_size) if read_offset > self.cluster_size: return self.read_from_chain( self.next_cluster(first_cluster), chain_size - self.cluster_size, read_offset - self.cluster_size, read_size) cluster_read_size = min(read_size, self.cluster_size - read_offset) buffer = self.read_cluster(first_cluster, read_offset, cluster_read_size) assert len(buffer) == cluster_read_size return buffer + self.read_from_chain( self.next_cluster(first_cluster), chain_size - self.cluster_size, 0, read_size - cluster_read_size)
elif a.name in nopub_actions:
if a.name in nopub_actions:
def trans_include(repo_uri, fargs, transaction=None): basedirs = [] timestamp_files = [] error_occurred = False opts, pargs = getopt.getopt(fargs, "d:T:") for opt, arg in opts: if opt == "-d": basedirs.append(arg) elif opt == "-T": timestamp_files.append(arg) if transaction == None: try: trans_id = os.environ["PKG_TRANS_ID"] except KeyError: usage(_("No transaction ID specified in $PKG_TRANS_ID"), cmd="include") xport, pub = setup_transport_and_pubs(repo_uri) t = trans.Transaction(repo_uri, trans_id=trans_id, xport=xport, pub=pub) else: t = transaction if not pargs: filelist = [("<stdin>", sys.stdin)] else: try: filelist = [(f, file(f)) for f in pargs] except IOError, e: error(e, cmd="include") return 1 lines = [] # giant string of all input files concatenated together linecnts = [] # tuples of starting line number, ending line number linecounter = 0 # running total for filename, f in filelist: try: data = f.read() except IOError, e: error(e, cmd="include") return 1 lines.append(data) linecnt = len(data.splitlines()) linecnts.append((linecounter, linecounter + linecnt)) linecounter += linecnt m = pkg.manifest.Manifest() try: m.set_content("\n".join(lines)) except apx.InvalidPackageErrors, err: e = err.errors[0] lineno = e.lineno for i, tup in enumerate(linecnts): if lineno > tup[0] and lineno <= tup[1]: filename = filelist[i][0] lineno -= tup[0] break else: filename = "???" lineno = "???" error(_("File %s line %s: %s") % (filename, lineno, e), cmd="include") return 1 invalid_action = False for a in m.gen_actions(): # don't publish this action if a.name == "set" and a.attrs["name"] in ["pkg.fmri", "fmri"]: continue elif a.name == "file": path, bd = pkg.actions.set_action_data( a.hash, a, basedirs) basename = os.path.basename(a.attrs["path"]) for pattern in timestamp_files: if fnmatch.fnmatch(basename, pattern): ts = pkg.misc.time_to_timestamp( os.stat(path).st_mtime) a.attrs["timestamp"] = ts break elif a.name == "license": pkg.actions.set_action_data(a.hash, a, basedirs) elif a.name in nopub_actions: error(_("invalid action for publication: %s") % str(a), cmd="include") invalid_action = True else: t.add(a) if invalid_action: return 3 else: return 0
self.pkgsend_bulk(durl, self.example_pkg10)
self.pkgsend_bulk(durl, self.dup_lines_pkg10)
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
self.pkg("search 'example_pkg:set:pkg.fmri:'")
self.pkg("search 'dup_lines:set:pkg.fmri:'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
self.pkg("search -o pkg.shortfmri '*6*'")
self.pkg("search -o pkg.shortfmri 'a'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
self.pkg("install example_pkg") self.pkg("search -l 'example_pkg:set:pkg.fmri:'")
self.pkg("install dup_lines") self.pkg("search -l 'dup_lines:set:pkg.fmri:'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
self.pkg("search -l -o pkg.shortfmri,action.key '*6*'") expected_number_of_lines = 9 if "pkg.fmri" in self.output: expected_number_of_lines += 1 self.debug("Expected number of lines:%s" % expected_number_of_lines) self.assertEqual(len(self.output.splitlines()), expected_number_of_lines)
self.pkg("search -l -o pkg.shortfmri,action.key 'a'") self.assertEqual(len(self.output.splitlines()), 4)
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
("show_on_expected_fail", 'f',
("showonexpectedfail", 'f',
def run(self): # nuke everything print("deleting " + dist_dir) shutil.rmtree(dist_dir, True) print("deleting " + build_dir) shutil.rmtree(build_dir, True) print("deleting " + root_dir) shutil.rmtree(root_dir, True) print("deleting " + pkgs_dir) shutil.rmtree(pkgs_dir, True) print("deleting " + extern_dir) shutil.rmtree(extern_dir, True)
self.show_on_expected_fail = 0
self.showonexpectedfail = 0
def initialize_options(self): self.only = "" self.baselinefile = "" self.verbosemode = 0 self.parseable = 0 self.genbaseline = 0 self.timing = 0 self.coverage = 0 self.stoponerr = 0 self.debugoutput = 0 self.show_on_expected_fail = 0 self.startattest = "" self.archivedir = ""
remote_publishers=True)
remote_prefix=True)
def main_func(): global cache_dir, download_start, xport, xport_cfg all_timestamps = False all_versions = False keep_compressed = False list_newest = False recursive = False src_uri = None target = None incoming_dir = None src_pub = None targ_pub = None temp_root = misc.config_temp_root() gettext.install("pkg", "/usr/share/locale") global_settings.client_name = "pkgrecv" target = os.environ.get("PKG_DEST", None) src_uri = os.environ.get("PKG_SRC", None) try: opts, pargs = getopt.getopt(sys.argv[1:], "c:d:hkm:nrs:") except getopt.GetoptError, e: usage(_("Illegal option -- %s") % e.opt) for opt, arg in opts: if opt == "-c": cache_dir = arg elif opt == "-d": target = arg elif opt == "-h": usage(retcode=0) elif opt == "-k": keep_compressed = True elif opt == "-n": list_newest = True elif opt == "-r": recursive = True elif opt == "-s": src_uri = arg elif opt == "-m": if arg == "all-timestamps": all_timestamps = True elif arg == "all-versions": all_versions = True else: usage(_("Illegal option value -- %s") % arg) if not src_uri: usage(_("a source repository must be provided")) if not cache_dir: cache_dir = tempfile.mkdtemp(dir=temp_root) # Only clean-up cache dir if implicitly created by pkgrecv. # User's cache-dirs should be preserved tmpdirs.append(cache_dir) incoming_dir = tempfile.mkdtemp(dir=temp_root) tmpdirs.append(incoming_dir) # Create transport and transport config xport, xport_cfg = transport.setup_transport() xport_cfg.cached_download_dir = cache_dir xport_cfg.incoming_download_dir = incoming_dir # Since publication destionations may only have one repository # configured per publisher, create destination as separate transport # in case source and destination have identical publisher configuration # but different repository endpoints. dest_xport, dest_xport_cfg = transport.setup_transport() dest_xport_cfg.cached_download_dir = cache_dir dest_xport_cfg.incoming_download_dir = incoming_dir # Configure src publisher src_pub = transport.setup_publisher(src_uri, "source", xport, xport_cfg, remote_publishers=True) tracker = get_tracker() if list_newest: if pargs or len(pargs) > 0: usage(_("-n takes no options")) fmri_list = fetch_catalog(src_pub, tracker, xport) list_newest_fmris(fmri_list) return 0 if pargs == None or len(pargs) == 0: usage(_("must specify at least one pkgfmri")) republish = False if not target: target = basedir = os.getcwd() elif target.find("://") != -1: basedir = tempfile.mkdtemp(dir=temp_root) tmpdirs.append(basedir) republish = True targ_pub = transport.setup_publisher(target, "target", dest_xport, dest_xport_cfg, remote_publishers=True) # Files have to be decompressed for republishing. keep_compressed = False if target.startswith("file://"): # Check to see if the repository exists first. try: t = trans.Transaction(target, xport=dest_xport, pub=targ_pub) except trans.TransactionRepositoryInvalidError, e: txt = str(e) + "\n\n" txt += _("To create a repository, use the " "pkgsend command.") abort(err=txt) except trans.TransactionRepositoryConfigError, e: txt = str(e) + "\n\n" txt += _("The repository configuration for " "the repository located at '%s' is not " "valid or the specified path does not " "exist. Please correct the configuration " "of the repository or create a new " "one.") % target abort(err=txt) except trans.TransactionError, e: abort(err=e) else: basedir = target if not os.path.exists(basedir): try: os.makedirs(basedir, misc.PKG_DIR_MODE) except: error(_("Unable to create basedir '%s'.") % \ basedir) return 1 xport_cfg.pkgdir = basedir if republish: targ_fmris = fetch_catalog(targ_pub, tracker, dest_xport) all_fmris = fetch_catalog(src_pub, tracker, xport) fmri_arguments = pargs fmri_list = prune(list(set(expand_matching_fmris(all_fmris, fmri_arguments))), all_versions, all_timestamps) if recursive: msg(_("Retrieving manifests for dependency evaluation ...")) tracker.evaluate_start() fmri_list = prune(get_dependencies(src_uri, fmri_list, basedir, tracker), all_versions, all_timestamps) tracker.evaluate_done() def get_basename(pfmri): open_time = pfmri.get_timestamp() return "%d_%s" % \ (calendar.timegm(open_time.utctimetuple()), urllib.quote(str(pfmri), "")) # First, retrieve the manifests and calculate package transfer sizes. npkgs = len(fmri_list) nfiles = 0 nbytes = 0 if not recursive: msg(_("Retrieving manifests for package evaluation ...")) tracker.evaluate_start(npkgs=npkgs) retrieve_list = [] while fmri_list: f = fmri_list.pop() if republish and f in targ_fmris: msg(_("Skipping %s: already present " "at destination") % f) continue m = get_manifest(f, basedir) pkgdir = os.path.join(basedir, f.get_dir_path()) mfile = xport.multi_file_ni(src_pub, pkgdir, not keep_compressed, tracker) nf, nb = add_hashes_to_multi(m, mfile) nfiles += nf nbytes += nb retrieve_list.append((f, mfile)) tracker.evaluate_progress(fmri=f) tracker.evaluate_done() # Next, retrieve and store the content for each package. msg(_("Retrieving package content ...")) tracker.download_set_goal(len(retrieve_list), nfiles, nbytes) publish_list = [] while retrieve_list: f, mfile = retrieve_list.pop() tracker.download_start_pkg(f.get_fmri(include_scheme=False)) if mfile: mfile.wait_files() if not download_start: download_start = True if republish: publish_list.append(f) tracker.download_end_pkg() tracker.download_done() tracker.reset() # Finally, republish the packages if needed. while publish_list: f = publish_list.pop() msg(_("Republishing %s ...") % f) m = get_manifest(f, basedir) # Get first line of original manifest so that inclusion of the # scheme can be determined. use_scheme = True contents = get_manifest(f, basedir, contents=True) if contents.splitlines()[0].find("pkg:/") == -1: use_scheme = False pkg_name = f.get_fmri(include_scheme=use_scheme) pkgdir = os.path.join(basedir, f.get_dir_path()) # This is needed so any previous failures for a package # can be aborted. trans_id = get_basename(f) if not targ_pub: targ_pub = transport.setup_publisher(target, "target", dest_xport, dest_xport_cfg, remote_publishers=True) try: t = trans.Transaction(target, pkg_name=pkg_name, trans_id=trans_id, xport=dest_xport, pub=targ_pub) # Remove any previous failed attempt to # to republish this package. try: t.close(abandon=True) except: # It might not exist already. pass t.open() for a in m.gen_actions(): if a.name == "set" and \ a.attrs.get("name", "") in ("fmri", "pkg.fmri"): # To be consistent with the server, # the fmri can't be added to the # manifest. continue if hasattr(a, "hash"): fname = os.path.join(pkgdir, a.hash) a.data = lambda: open(fname, "rb") t.add(a) t.close() except trans.TransactionError, e: abort(err=e) return 1 # Dump all temporary data. cleanup() return 0
dest_xport, dest_xport_cfg, remote_publishers=True)
dest_xport, dest_xport_cfg, remote_prefix=True)
def main_func(): global cache_dir, download_start, xport, xport_cfg all_timestamps = False all_versions = False keep_compressed = False list_newest = False recursive = False src_uri = None target = None incoming_dir = None src_pub = None targ_pub = None temp_root = misc.config_temp_root() gettext.install("pkg", "/usr/share/locale") global_settings.client_name = "pkgrecv" target = os.environ.get("PKG_DEST", None) src_uri = os.environ.get("PKG_SRC", None) try: opts, pargs = getopt.getopt(sys.argv[1:], "c:d:hkm:nrs:") except getopt.GetoptError, e: usage(_("Illegal option -- %s") % e.opt) for opt, arg in opts: if opt == "-c": cache_dir = arg elif opt == "-d": target = arg elif opt == "-h": usage(retcode=0) elif opt == "-k": keep_compressed = True elif opt == "-n": list_newest = True elif opt == "-r": recursive = True elif opt == "-s": src_uri = arg elif opt == "-m": if arg == "all-timestamps": all_timestamps = True elif arg == "all-versions": all_versions = True else: usage(_("Illegal option value -- %s") % arg) if not src_uri: usage(_("a source repository must be provided")) if not cache_dir: cache_dir = tempfile.mkdtemp(dir=temp_root) # Only clean-up cache dir if implicitly created by pkgrecv. # User's cache-dirs should be preserved tmpdirs.append(cache_dir) incoming_dir = tempfile.mkdtemp(dir=temp_root) tmpdirs.append(incoming_dir) # Create transport and transport config xport, xport_cfg = transport.setup_transport() xport_cfg.cached_download_dir = cache_dir xport_cfg.incoming_download_dir = incoming_dir # Since publication destionations may only have one repository # configured per publisher, create destination as separate transport # in case source and destination have identical publisher configuration # but different repository endpoints. dest_xport, dest_xport_cfg = transport.setup_transport() dest_xport_cfg.cached_download_dir = cache_dir dest_xport_cfg.incoming_download_dir = incoming_dir # Configure src publisher src_pub = transport.setup_publisher(src_uri, "source", xport, xport_cfg, remote_publishers=True) tracker = get_tracker() if list_newest: if pargs or len(pargs) > 0: usage(_("-n takes no options")) fmri_list = fetch_catalog(src_pub, tracker, xport) list_newest_fmris(fmri_list) return 0 if pargs == None or len(pargs) == 0: usage(_("must specify at least one pkgfmri")) republish = False if not target: target = basedir = os.getcwd() elif target.find("://") != -1: basedir = tempfile.mkdtemp(dir=temp_root) tmpdirs.append(basedir) republish = True targ_pub = transport.setup_publisher(target, "target", dest_xport, dest_xport_cfg, remote_publishers=True) # Files have to be decompressed for republishing. keep_compressed = False if target.startswith("file://"): # Check to see if the repository exists first. try: t = trans.Transaction(target, xport=dest_xport, pub=targ_pub) except trans.TransactionRepositoryInvalidError, e: txt = str(e) + "\n\n" txt += _("To create a repository, use the " "pkgsend command.") abort(err=txt) except trans.TransactionRepositoryConfigError, e: txt = str(e) + "\n\n" txt += _("The repository configuration for " "the repository located at '%s' is not " "valid or the specified path does not " "exist. Please correct the configuration " "of the repository or create a new " "one.") % target abort(err=txt) except trans.TransactionError, e: abort(err=e) else: basedir = target if not os.path.exists(basedir): try: os.makedirs(basedir, misc.PKG_DIR_MODE) except: error(_("Unable to create basedir '%s'.") % \ basedir) return 1 xport_cfg.pkgdir = basedir if republish: targ_fmris = fetch_catalog(targ_pub, tracker, dest_xport) all_fmris = fetch_catalog(src_pub, tracker, xport) fmri_arguments = pargs fmri_list = prune(list(set(expand_matching_fmris(all_fmris, fmri_arguments))), all_versions, all_timestamps) if recursive: msg(_("Retrieving manifests for dependency evaluation ...")) tracker.evaluate_start() fmri_list = prune(get_dependencies(src_uri, fmri_list, basedir, tracker), all_versions, all_timestamps) tracker.evaluate_done() def get_basename(pfmri): open_time = pfmri.get_timestamp() return "%d_%s" % \ (calendar.timegm(open_time.utctimetuple()), urllib.quote(str(pfmri), "")) # First, retrieve the manifests and calculate package transfer sizes. npkgs = len(fmri_list) nfiles = 0 nbytes = 0 if not recursive: msg(_("Retrieving manifests for package evaluation ...")) tracker.evaluate_start(npkgs=npkgs) retrieve_list = [] while fmri_list: f = fmri_list.pop() if republish and f in targ_fmris: msg(_("Skipping %s: already present " "at destination") % f) continue m = get_manifest(f, basedir) pkgdir = os.path.join(basedir, f.get_dir_path()) mfile = xport.multi_file_ni(src_pub, pkgdir, not keep_compressed, tracker) nf, nb = add_hashes_to_multi(m, mfile) nfiles += nf nbytes += nb retrieve_list.append((f, mfile)) tracker.evaluate_progress(fmri=f) tracker.evaluate_done() # Next, retrieve and store the content for each package. msg(_("Retrieving package content ...")) tracker.download_set_goal(len(retrieve_list), nfiles, nbytes) publish_list = [] while retrieve_list: f, mfile = retrieve_list.pop() tracker.download_start_pkg(f.get_fmri(include_scheme=False)) if mfile: mfile.wait_files() if not download_start: download_start = True if republish: publish_list.append(f) tracker.download_end_pkg() tracker.download_done() tracker.reset() # Finally, republish the packages if needed. while publish_list: f = publish_list.pop() msg(_("Republishing %s ...") % f) m = get_manifest(f, basedir) # Get first line of original manifest so that inclusion of the # scheme can be determined. use_scheme = True contents = get_manifest(f, basedir, contents=True) if contents.splitlines()[0].find("pkg:/") == -1: use_scheme = False pkg_name = f.get_fmri(include_scheme=use_scheme) pkgdir = os.path.join(basedir, f.get_dir_path()) # This is needed so any previous failures for a package # can be aborted. trans_id = get_basename(f) if not targ_pub: targ_pub = transport.setup_publisher(target, "target", dest_xport, dest_xport_cfg, remote_publishers=True) try: t = trans.Transaction(target, pkg_name=pkg_name, trans_id=trans_id, xport=dest_xport, pub=targ_pub) # Remove any previous failed attempt to # to republish this package. try: t.close(abandon=True) except: # It might not exist already. pass t.open() for a in m.gen_actions(): if a.name == "set" and \ a.attrs.get("name", "") in ("fmri", "pkg.fmri"): # To be consistent with the server, # the fmri can't be added to the # manifest. continue if hasattr(a, "hash"): fname = os.path.join(pkgdir, a.hash) a.data = lambda: open(fname, "rb") t.add(a) t.close() except trans.TransactionError, e: abort(err=e) return 1 # Dump all temporary data. cleanup() return 0
dest_xport, dest_xport_cfg, remote_publishers=True)
dest_xport, dest_xport_cfg, remote_prefix=True)
def get_basename(pfmri): open_time = pfmri.get_timestamp() return "%d_%s" % \ (calendar.timegm(open_time.utctimetuple()), urllib.quote(str(pfmri), ""))
publisher_info = self._get_getpublisherinfo(pub,
publisher_info = self._get_publisherinfo(pub,
def get_publisherinfo(self, pub, ccancel=None): """Given a publisher pub, return the publisher/0 information in a StringIO object."""
if not_these_pkgs: newpkgs = set(pkgdict[name] for name in pkgdict.keys() if name not in not_these_pkgs ) else: newpkgs = set(pkgdict.values())
def main_func(): global file_repo global def_branch global def_repo global def_vers global extra_entire_contents global just_these_pkgs global not_these_pkgs global nopublish global publish_all global print_pkg_names global reference_uris global show_debug global wos_path global not_these_consolidations global curpkg try: _opts, _args = getopt.getopt(sys.argv[1:], "AB:C:D:E:I:J:G:NR:T:b:dj:m:ns:v:w:p:") except getopt.GetoptError, _e: print "unknown option", _e.opt sys.exit(1) g_proto_area = os.environ.get("ROOT", "") for opt, arg in _opts: if opt == "-b": def_branch = arg.rstrip("abcdefghijklmnopqrstuvwxyz") elif opt == "-d": show_debug = True elif opt == "-j": # means we're using the new argument form... just_these_pkgs.append(arg) elif opt == "-m": _a = arg.split("=", 1) set_macro(_a[0], _a[1]) elif opt == "-n": nopublish = True elif opt == "-p": if not os.path.exists(arg): raise RuntimeError("Invalid prototype area specified.") # Clean up relative ../../, etc. out of path to proto g_proto_area = os.path.realpath(arg) elif opt == "-s": def_repo = arg if def_repo.startswith("file://"): file_repo = True elif opt == "-v": def_vers = arg elif opt == "-w": wos_path.append(arg) elif opt == "-A": # Always publish obsoleted and renamed packages. publish_all = True elif opt == "-B": branch_file = file(arg) for _line in branch_file: if not _line.startswith("#"): bfargs = _line.split() if len(bfargs) == 2: branch_dict[bfargs[0]] = bfargs[1] branch_file.close() elif opt == "-C": not_these_consolidations.append(arg) elif opt == "-D": elided_files[arg] = True elif opt == "-E": if "@" not in arg: print "-E fmris require a version: %s" % arg sys.exit(2) extra_entire_contents.append(arg) elif opt == "-I": include_path.extend(arg.split(":")) elif opt == "-J": not_these_pkgs.append(arg) elif opt == "-G": #another file of global includes global_includes.append(arg) elif opt == "-N": print_pkg_names = True elif opt == "-R": reference_uris.append(arg) elif opt == "-T": timestamp_files.append(arg) if not def_branch: print "need a branch id (build number)" sys.exit(2) elif "." not in def_branch: print "branch id needs to be of the form 'x.y'" sys.exit(2) if not _args: print "need argument!" sys.exit(2) if not wos_path: wos_path = def_wos_path if just_these_pkgs: filelist = _args else: filelist = _args[0:1] just_these_pkgs = _args[1:] if print_pkg_names: for _mf in filelist: SolarisParse(_mf) sys.exit(0) start_time = time.clock() print "First pass: initial import", datetime.now() for _mf in filelist: SolarisParse(_mf) # Remove pkgs we're not touching because we're skipping that # consolidation pkgs_to_elide = [ p.name for p in pkgdict.values() if p.consolidation in not_these_consolidations ] for pkg in pkgs_to_elide: del pkgdict[pkg] for pkg in not_these_pkgs: del pkgdict[pkg] # Unless we are publishing all obsolete and renamed packages # (-A command line option), remove obsolete and renamed packages # that weren't obsoleted or renamed at this branch and create # a dictionary (called or_pkgs_per_con) of obsoleted and renamed # packages per consolidation. The version portion of the fmri # will contain the branch that the package was obsoleted or renamed at. or_pkgs_per_con = {} obs_or_renamed_pkgs = {} for pkg in pkgdict.keys(): obs_branch = pkgdict[pkg].obsolete_branch rename_branch = pkgdict[pkg].rename_branch ver_tokens = pkgdict[pkg].version.split(".") cons = pkgdict[pkg].consolidation if obs_branch: ver_tokens[-1] = obs_branch ver_string = ".".join(ver_tokens) or_pkgs_per_con.setdefault(cons, {})[pkg] = ver_string obs_or_renamed_pkgs[pkg] = (pkgdict[pkg].fmristr(), "obsolete") if publish_all: pkgdict[pkg].version = ver_string else: if obs_branch != def_branch.split(".")[1]: # Not publishing this obsolete package. del pkgdict[pkg] if rename_branch: ver_tokens[-1] = rename_branch ver_string = ".".join(ver_tokens) or_pkgs_per_con.setdefault(cons, {})[pkg] = ver_string obs_or_renamed_pkgs[pkg] = (pkgdict[pkg].fmristr(), "renamed") if publish_all: pkgdict[pkg].version = ver_string else: if rename_branch != def_branch.split(".")[1]: # Not publishing this renamed package. del pkgdict[pkg] print "Second pass: global crosschecks", datetime.now() # perform global crosschecks # path_dict.clear() for pkg in pkgdict.values(): for action in pkg.actions: if "path" not in action.attrs: continue path = action.attrs["path"] path_dict.setdefault(path, []).append(action) if action.name in ["file", "link", "hardlink"]: basename_dict.setdefault(os.path.basename(path), []).append(action) pkgpath_dict.setdefault(path, []).append(action.attrs["importer.ipspkg"]) errors = check_pathdict_actions(path_dict) if errors: for e in errors: print "Fail: %s" % e sys.exit(1) # check for require dependencies on obsolete or renamed pkgs errors = [] warns = [] for pack in pkgdict.values(): for action in pack.actions: if action.name != "depend": continue if action.attrs["type"] == "require" and "fmri" in action.attrs: fmri = action.attrs["fmri"].split("@")[0] # remove version if fmri.startswith("pkg:/"): # remove pkg:/ if exists fmri = fmri[5:] if fmri in obs_or_renamed_pkgs: tup = obs_or_renamed_pkgs[fmri] s = "Pkg %s has 'require' dependency on pkg %s, which is %s" % ( (pack.fmristr(),) + tup) if tup[1] == "obsolete": errors.append(s) else: warns.append(s) if warns: for w in warns: print "Warn: %s" % w if errors: for e in errors: print "Fail: %s" % e sys.exit(1) print "packages being published are self consistent" if reference_uris: print "downloading and checking external references" excludes = [variant.Variants({"variant.arch": get_arch()}).allow_action] for uri in reference_uris: server, fmri_string = uri.split("@", 1) for pfmri in get_dependencies(server, [fmri_string]): if pfmri.get_name() in pkgdict: continue # ignore pkgs already seen pfmri_str = "%s@%s" % (pfmri.get_name(), pfmri.get_version()) fmridict[pfmri.get_name()] = pfmri_str for action in get_manifest(server, pfmri).gen_actions(excludes): if "path" not in action.attrs: continue if action.name == "unknown": # we don't care about unknown actions - # mispublished packages with eg. SVR4 # pkginfo files result in duplicate paths, # causing errors in check_pathdict_actions # "Multiple actions on different types # with the same path" print "INFO: ignoring action in %s: %s" \ % (pfmri_str, str(action)) continue action.attrs["importer.ipspkg"] = pfmri_str path_dict.setdefault(action.attrs["path"], []).append(action) if action.name in ["file", "link", "hardlink"]: basename_dict.setdefault(os.path.basename( action.attrs["path"]), []).append(action) pkgpath_dict.setdefault(action.attrs["path"], []).append(action.attrs["importer.ipspkg"]) errors = check_pathdict_actions(path_dict) if errors: for e in errors: print "Fail: %s" % e sys.exit(1) print "external packages checked for conflicts" print "Third pass: dependency id, resolution and publication", datetime.now() consolidation_incorporations = [] obsoleted_renamed_pkgs = [] # Generate consolidation incorporations for cons in cons_dict.keys(): if cons in not_these_consolidations: print "skipping consolidation %s" % cons continue consolidation_incorporation = "consolidation/%s/%s-incorporation" % ( cons, cons) consolidation_incorporations.append(consolidation_incorporation) curpkg = start_package(consolidation_incorporation) curpkg.summary = "%s consolidation incorporation" % cons curpkg.desc = "This incorporation constrains packages " \ "from the %s consolidation." % cons # Add packages that aren't renamed or obsoleted or_pkgs = or_pkgs_per_con.get(cons, {}) curpkg.actions.append(actions.fromstr( "set name=pkg.depend.install-hold value=core-os.%s" % cons)) for depend in cons_dict[cons]: if depend not in or_pkgs: action = actions.fromstr( "depend fmri=%s type=incorporate" % depend) action.attrs["importer.source"] = "depend" curpkg.actions.append(action) # Add in the obsoleted and renamed packages for this # consolidation. for name, version in or_pkgs.iteritems(): action = actions.fromstr( "depend fmri=%s@%s type=incorporate" % (name, version)) action.attrs["importer.source"] = "depend" curpkg.actions.append(action) obsoleted_renamed_pkgs.append("%s@%s" % (name, version)) action = actions.fromstr("set " \ "name=org.opensolaris.consolidation value=%s" % cons) action.attrs["importer.source"] = "add" curpkg.actions.append(action) end_package(curpkg) curpkg = None # Generate entire consolidation if we're generating any consolidation incorps if consolidation_incorporations: curpkg = start_package("entire") curpkg.summary = "incorporation to lock all system packages to same build" curpkg.desc = "This package constrains " \ "system package versions to the same build. WARNING: Proper " \ "system update and correct package selection depend on the " \ "presence of this incorporation. Removing this package will " \ "result in an unsupported system." curpkg.actions.append(actions.fromstr( "set name=pkg.depend.install-hold value=core-os")) for incorp in consolidation_incorporations: action = actions.fromstr("depend fmri=%s type=incorporate" % incorp) action.attrs["importer.source"] = "auto-generated" curpkg.actions.append(action) action = actions.fromstr("depend fmri=%s type=require" % incorp) action.attrs["importer.source"] = "auto-generated" action.attrs["importer.no-version"] = "true" curpkg.actions.append(action) for extra in extra_entire_contents: action = actions.fromstr("depend fmri=%s type=incorporate" % extra) action.attrs["importer.source"] = "command-line" curpkg.actions.append(action) extra_noversion = extra.split("@")[0] # remove version action = actions.fromstr("depend fmri=%s type=require" % extra_noversion) action.attrs["importer.source"] = "command-line" action.attrs["importer.no-version"] = "true" curpkg.actions.append(action) end_package(curpkg) curpkg = None incorporated_pkgs = set([ f for l in cons_dict.values() for f in l ]) incorporated_pkgs |= set(consolidation_incorporations) incorporated_pkgs |= set(["entire", "redistributable"]) incorporated_pkgs |= set(obsoleted_renamed_pkgs) unincorps = set(pkgdict.keys()) - incorporated_pkgs if unincorps: # look through these; if they have only set actions they're # ancient obsoleted pkgs - ignore them. for f in unincorps.copy(): for a in pkgdict[f].actions: if a.name != "set": break else: unincorps.remove(f) print "The following non-empty unincorporated pkgs are not part of any consolidation" for f in unincorps: print f if just_these_pkgs: newpkgs = set(pkgdict[name] for name in pkgdict.keys() if name in just_these_pkgs ) else: newpkgs = set(pkgdict.values()) if not_these_pkgs: newpkgs = set(pkgdict[name] for name in pkgdict.keys() if name not in not_these_pkgs ) else: newpkgs = set(pkgdict.values()) if not_these_consolidations: newpkgs = set([ p for p in newpkgs if not p.delivered_via_ips() ]) processed = 0 total = len(newpkgs) error_count = 0 for _p in sorted(newpkgs): if show_debug: print " Version:", _p.version print " Description:", _p.desc print " Summary:", _p.summary print " Classification:", ",".join(_p.classification) try: publish_pkg(_p) except trans.TransactionError, _e: print "%s: FAILED: %s\n" % (_p.name, _e) error_count += 1 processed += 1 if show_debug: print "%d/%d packages processed; %.2f%% complete" % (processed, total, processed * 100.0 / total) if error_count: print "%d/%d packages has errors; %.2f%% FAILED" % (error_count, total, error_count * 100.0 / total) sys.exit(1) print "%d/%d packages processed; %.2f%% complete" % (processed, total, processed * 100.0 / total) if file_repo: code = repo_add_content(def_repo[7:], g_proto_area) if code: sys.exit(code) print "Done:", datetime.now() elapsed = time.clock() - start_time print "publication took %d:%.2d" % (elapsed/60, elapsed % 60) sys.exit(0)
pub_name = "opensolaris.org"
def get_smf_packages(server_url, manifest_locations, filter): """ Performs a search against server_url looking for packages which contain SMF manifests, returning a list of those pfmris """ dir = os.getcwd() tracker = pkg.client.progress.QuietProgressTracker() image_dir = tempfile.mkdtemp("", "pkg_importer_smfsearch.") is_zone = False pub_name = "opensolaris.org" refresh_allowed = True # create a temporary image api_inst = pkg.client.api.image_create(PKG_CLIENT_NAME, CLIENT_API_VERSION, image_dir, pkg.client.api.IMG_TYPE_USER, is_zone, facets=pkg.facet.Facets(), force=False, prefix=pub_name, progtrack=tracker, refresh_allowed=refresh_allowed, repo_uri=server_url) api_inst = pkg.client.api.ImageInterface(image_dir, pkg.client.api.CURRENT_API_VERSION, tracker, None, PKG_CLIENT_NAME) # restore the current directory, which ImageInterace had changed os.chdir(dir) searches = [] fmris = set() case_sensitive = False return_actions = True query = [] for manifest_loc in manifest_locations: query.append(pkg.client.api.Query(":directory:path:/%s" % manifest_loc, case_sensitive, return_actions)) searches.append(api_inst.remote_search(query)) shutil.rmtree(image_dir, True) for item in searches: for result in item: pfmri = None try: query_num, pub, (v, return_type, tmp) = result pfmri, index, action = tmp except ValueError: raise if pfmri is None: continue if filter in pfmri.get_fmri(): fmris.add(pfmri.get_fmri()) return [pkg.fmri.PkgFmri(pfmri) for pfmri in fmris]
is_zone, facets=pkg.facet.Facets(), force=False, prefix=pub_name,
is_zone, facets=pkg.facet.Facets(), force=False,
def get_smf_packages(server_url, manifest_locations, filter): """ Performs a search against server_url looking for packages which contain SMF manifests, returning a list of those pfmris """ dir = os.getcwd() tracker = pkg.client.progress.QuietProgressTracker() image_dir = tempfile.mkdtemp("", "pkg_importer_smfsearch.") is_zone = False pub_name = "opensolaris.org" refresh_allowed = True # create a temporary image api_inst = pkg.client.api.image_create(PKG_CLIENT_NAME, CLIENT_API_VERSION, image_dir, pkg.client.api.IMG_TYPE_USER, is_zone, facets=pkg.facet.Facets(), force=False, prefix=pub_name, progtrack=tracker, refresh_allowed=refresh_allowed, repo_uri=server_url) api_inst = pkg.client.api.ImageInterface(image_dir, pkg.client.api.CURRENT_API_VERSION, tracker, None, PKG_CLIENT_NAME) # restore the current directory, which ImageInterace had changed os.chdir(dir) searches = [] fmris = set() case_sensitive = False return_actions = True query = [] for manifest_loc in manifest_locations: query.append(pkg.client.api.Query(":directory:path:/%s" % manifest_loc, case_sensitive, return_actions)) searches.append(api_inst.remote_search(query)) shutil.rmtree(image_dir, True) for item in searches: for result in item: pfmri = None try: query_num, pub, (v, return_type, tmp) = result pfmri, index, action = tmp except ValueError: raise if pfmri is None: continue if filter in pfmri.get_fmri(): fmris.add(pfmri.get_fmri()) return [pkg.fmri.PkgFmri(pfmri) for pfmri in fmris]
self.write(cursor, user, id, {db_field: value}, context=context)
self.write(cursor, user, id, { db_field: datetime.datetime.combine(value, datetime.time()), }, context=context)
def set_function_fields(self, cursor, user, id, name, value, arg, context=None): request_obj = self.pool.get('res.request') req_ref_obj = self.pool.get('res.request.reference')
date = datetime(
date = datetime.datetime(
def add_minutes(self, cursor, user, company, date, minutes, context=None): minutes = int(round(minutes)) minutes = date.minute + minutes
date = datetime(
date = datetime.datetime(
def add_hours(self, cursor, user, company, date, hours, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
date += timedelta(days= -date.weekday() + intfloor(days))
date += datetime.timedelta(days= -date.weekday() + intfloor(days))
def add_days(self, cursor, user, company, date, days, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
date += timedelta(days= 7 * intfloor(weeks))
date += datetime.timedelta(days= 7 * intfloor(weeks))
def add_weeks(self, cursor, user, company, date, weeks, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
cursor = Transation().cursor
cursor = Transaction().cursor
def get_function_fields(self, ids, names): ''' Function to compute function fields
self.assertRaises(Exception, test_view('project_plan'))
test_view('project_plan')
def test0005views(self): ''' Test views. ''' self.assertRaises(Exception, test_view('project_plan'))
db_field: datetime.datetime.combine(value, datetime.time()),
db_field: value \ and datetime.datetime.combine(value, datetime.time()) \ or False,
def set_function_fields(self, cursor, user, ids, name, value, context=None): request_obj = self.pool.get('res.request') req_ref_obj = self.pool.get('res.request.reference')
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.jobStart(), self.jobEnd(), self.getTime(), self.getCost(), self.getState()))
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
def log_job_closed(self): log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.jobStart(), self.jobEnd(), self.getTime(), self.getCost(), self.getState()))
self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
if self.__spool is not None: self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
def cleanUp(self): """cleans up the task, i.e. removes the task's spool directory""" self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
return self.__fsm.getState().getName()
try: return self.__fsm.getState().getName() except: return "Terminated"
def getState(self): return self.__fsm.getState().getName()
log.debug("Event '%s' not found." % event)
log.error("Event '%s' not found." % event)
def do_Event(self, event, reqCtxt): log.debug("JOB '%s' run in state '%s' event '%s'" % (self.ticket(), self.__fsm.getState().getName(), event)) if hasattr(self.__fsm, event): log.debug("Run event '%s'" % event) getattr(self.__fsm, event)(self, reqCtxt) else: log.debug("Event '%s' not found." % event) raise CommandFailed("jobFSM: No such Transition '%s'." % event)
log.debug("=========>do_Event '%s' run in state '%s' even [%s]" %
log.debug("=========>do_Event '%s' run in state '%s' event [%s]" %
def do_EventByMap(self, eventkey, reqCtxt): eventMap = { "Pending:Reserved" : 1, "Pending:Confirmed" : "confirm", "Running:Stage-In" : "runJob_StageIn", "Running:Instance-Starting" : "", "Running:Executing" : "runJob_Execute", "Running:Stage-Out" : "runJob_StageOut", "Running:Instance-Stopping" : "", "Finished" : "closeJob_Closing",
log.debug("FAILED: %s." % e) else: log.debug("Event '%s' not found." % event)
log.error("FAILED: %s." % e) else: log.error("Event '%s' not found." % event)
def do_EventByMap(self, eventkey, reqCtxt): eventMap = { "Pending:Reserved" : 1, "Pending:Confirmed" : "confirm", "Running:Stage-In" : "runJob_StageIn", "Running:Instance-Starting" : "", "Running:Executing" : "runJob_Execute", "Running:Stage-Out" : "runJob_StageOut", "Running:Instance-Stopping" : "", "Finished" : "closeJob_Closing",
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
log.debug("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState())) return True
def log_job_closed(self): log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
if (values[0] > bins[0]):
if (values[0] >= bins[0]):
def inline_as_py(values, bins=10, range=None): # define bins, size N if (range is not None): mn, mx = range if (mn > mx): raise AttributeError( 'max must be larger than min in range parameter.') if not np.iterable(bins): if range is None: range = (values.min(), values.max()) mn, mx = [mi+0.0 for mi in range] if mn == mx: mn -= 0.5 mx += 0.5 bins = np.linspace(mn, mx, bins+1, endpoint=True) else: bins = np.asarray(bins) if (np.diff(bins) < 0).any(): raise AttributeError( 'bins must increase monotonically.') # define n, empty array of size N+1 count = np.zeros(bins.size - 1, int) nvalues = values.size nbins = bins.size if values.size == 0: raise AttributeError( 'a must contain some data') if values[-1] < bins[0]: raise AttributeError( 'last element of a must be smaller than first element of bins') if (values[0] > bins[0]): rb = 0; else: lb = 0; rb = nvalues + 1; while(lb < rb - 1): if (values[(lb + rb) / 2.] < bins[0]): lb = (lb + rb) / 2. else: rb = (lb + rb) / 2. # Sweep through the values, counting, until they get too big lb = 0; valid = (rb < nvalues) if valid: valid = valid & (values[rb] < bins[nbins - 1]) while valid: # Advance the edge caret until the current value is in the current bin while (bins[lb+1] < values[rb]): lb += 1 # Increment the current bin count[lb] += 1 # Increment the value caret rb += 1 valid = (rb < nvalues) if valid: valid = valid & (values[rb] < bins[nbins - 1]) return count, bins
while (bins[lb+1] < values[rb]):
while (bins[lb+1] <= values[rb]):
def inline_as_py(values, bins=10, range=None): # define bins, size N if (range is not None): mn, mx = range if (mn > mx): raise AttributeError( 'max must be larger than min in range parameter.') if not np.iterable(bins): if range is None: range = (values.min(), values.max()) mn, mx = [mi+0.0 for mi in range] if mn == mx: mn -= 0.5 mx += 0.5 bins = np.linspace(mn, mx, bins+1, endpoint=True) else: bins = np.asarray(bins) if (np.diff(bins) < 0).any(): raise AttributeError( 'bins must increase monotonically.') # define n, empty array of size N+1 count = np.zeros(bins.size - 1, int) nvalues = values.size nbins = bins.size if values.size == 0: raise AttributeError( 'a must contain some data') if values[-1] < bins[0]: raise AttributeError( 'last element of a must be smaller than first element of bins') if (values[0] > bins[0]): rb = 0; else: lb = 0; rb = nvalues + 1; while(lb < rb - 1): if (values[(lb + rb) / 2.] < bins[0]): lb = (lb + rb) / 2. else: rb = (lb + rb) / 2. # Sweep through the values, counting, until they get too big lb = 0; valid = (rb < nvalues) if valid: valid = valid & (values[rb] < bins[nbins - 1]) while valid: # Advance the edge caret until the current value is in the current bin while (bins[lb+1] < values[rb]): lb += 1 # Increment the current bin count[lb] += 1 # Increment the value caret rb += 1 valid = (rb < nvalues) if valid: valid = valid & (values[rb] < bins[nbins - 1]) return count, bins
if bins[-1] == values[rb]: count[-1] += 1
def inline_as_py(values, bins=10, range=None): # define bins, size N if (range is not None): mn, mx = range if (mn > mx): raise AttributeError( 'max must be larger than min in range parameter.') if not np.iterable(bins): if range is None: range = (values.min(), values.max()) mn, mx = [mi+0.0 for mi in range] if mn == mx: mn -= 0.5 mx += 0.5 bins = np.linspace(mn, mx, bins+1, endpoint=True) else: bins = np.asarray(bins) if (np.diff(bins) < 0).any(): raise AttributeError( 'bins must increase monotonically.') # define n, empty array of size N+1 count = np.zeros(bins.size - 1, int) nvalues = values.size nbins = bins.size if values.size == 0: raise AttributeError( 'a must contain some data') if values[-1] < bins[0]: raise AttributeError( 'last element of a must be smaller than first element of bins') if (values[0] > bins[0]): rb = 0; else: lb = 0; rb = nvalues + 1; while(lb < rb - 1): if (values[(lb + rb) / 2.] < bins[0]): lb = (lb + rb) / 2. else: rb = (lb + rb) / 2. # Sweep through the values, counting, until they get too big lb = 0; valid = (rb < nvalues) if valid: valid = valid & (values[rb] < bins[nbins - 1]) while valid: # Advance the edge caret until the current value is in the current bin while (bins[lb+1] < values[rb]): lb += 1 # Increment the current bin count[lb] += 1 # Increment the value caret rb += 1 valid = (rb < nvalues) if valid: valid = valid & (values[rb] < bins[nbins - 1]) return count, bins
outputs = project_dirs[project]
outputs = project_dirs[project] + '/outputs'
def make_today_dir(project='tuning_change'): outputs = project_dirs[project] today = outputs + '/' + time.strftime('%y%m%d') if not os.path.exists(today): os.mkdir(today) return today
def ExpectStanza(stanza, name): if stanza.tagName != name: raise UnexpectedXml(stanza) def ExpectIq(stanza, type, name): ExpectStanza(stanza, 'iq') if (stanza.getAttribute('type') != type or stanza.firstChild.tagName != name): raise UnexpectedXml(stanza) def GetStanzaId(stanza): return stanza.getAttribute('id') def HandleStream(stanza): ExpectStanza(stanza, 'stream:stream') domain = stanza.getAttribute('to') if domain: self._domain = domain SendStreamData() def SendStreamData(): next_id = self._id_generator.GetNextId() stream_data = self._STREAM_DATA % (self._domain, next_id) self._connection.SendData(stream_data) def GetUserDomain(stanza): encoded_username_password = stanza.firstChild.data username_password = base64.b64decode(encoded_username_password) (_, username_domain, _) = username_password.split('\0') at_pos = username_domain.find('@') if at_pos != -1: username = username_domain[:at_pos] domain = username_domain[at_pos+1:] else: username = username_domain domain = self._domain return (username, domain) if self._state == self._INITIAL_STREAM_NEEDED: HandleStream(stanza) self._connection.SendStanza(self._AUTH_STANZA, False) self._state = self._AUTH_NEEDED elif self._state == self._AUTH_NEEDED: ExpectStanza(stanza, 'auth') (self._username, self._domain) = GetUserDomain(stanza) self._connection.SendStanza(self._AUTH_SUCCESS_STANZA, False) self._state = self._AUTH_STREAM_NEEDED elif self._state == self._AUTH_STREAM_NEEDED: HandleStream(stanza) self._connection.SendStanza(self._BIND_STANZA, False) self._state = self._BIND_NEEDED elif self._state == self._BIND_NEEDED: ExpectIq(stanza, 'set', 'bind') stanza_id = GetStanzaId(stanza) resource_element = stanza.getElementsByTagName('resource')[0] resource = resource_element.firstChild.data full_resource = '%s.%s' % (self._resource_prefix, resource) response = CloneXml(self._BIND_RESULT_STANZA) response.setAttribute('id', stanza_id) self._jid = Jid(self._username, self._domain, full_resource) jid_text = response.parentNode.createTextNode(str(self._jid)) response.getElementsByTagName('jid')[0].appendChild(jid_text) self._connection.SendStanza(response) self._state = self._SESSION_NEEDED elif self._state == self._SESSION_NEEDED: ExpectIq(stanza, 'set', 'session') stanza_id = GetStanzaId(stanza) xml = CloneXml(self._IQ_RESPONSE_STANZA) xml.setAttribute('id', stanza_id) self._connection.SendStanza(xml) self._state = self._FINISHED self._connection.HandshakeDone(self._jid) def AddrString(addr): return '%s:%d' % addr class XmppConnection(asynchat.async_chat): """A single XMPP client connection. This class handles the connection to a single XMPP client (via a socket). It does the XMPP handshake and also implements the (old) Google notification protocol. """ _NOTIFIER_STANZA = ParseXml( """<iq from="" to="" id="" type=""> <not:getAll xmlns:not="google:notifier"> <Result xmlns=""/> </not:getAll> </iq> """) def __init__(self, sock, socket_map, connections, addr): """Starts up the xmpp connection. Args: sock: The socket to the client. socket_map: A map from sockets to their owning objects. connections: The set of handshake-completed connections. addr: The host/port of the client. """ asynchat.async_chat.__init__(self, sock) self.set_terminator(None) self._socket_map = socket_map self._socket_map[self.fileno()] = self self._connections = connections self._parser = StanzaParser(self) self._jid = None self._addr = addr addr_str = AddrString(self._addr) self._id_generator = IdGenerator(addr_str) self._handshake_task = ( HandshakeTask(self, self._id_generator, addr_str)) print 'Starting connection to %s' % self def __str__(self): if self._jid: return str(self._jid)
self.stanzas.append(stanza.toxml()) def testBasic(self): parser = xmppserver.StanzaParser(self) parser.FeedString('<foo') self.assertEqual(len(self.stanzas), 0) parser.FeedString('/><bar></bar>') self.assertEqual(self.stanzas[0], '<foo/>') self.assertEqual(self.stanzas[1], '<bar/>') def testStream(self): parser = xmppserver.StanzaParser(self) parser.FeedString('<stream') self.assertEqual(len(self.stanzas), 0) parser.FeedString(':stream foo="bar" xmlns:stream="baz">') self.assertEqual(self.stanzas[0], '<stream:stream foo="bar" xmlns:stream="baz"/>') def testNested(self): parser = xmppserver.StanzaParser(self) parser.FeedString('<foo') self.assertEqual(len(self.stanzas), 0) parser.FeedString(' bar="baz"') parser.FeedString('><baz/><blah>meh</blah></foo>') self.assertEqual(self.stanzas[0], '<foo bar="baz"><baz/><blah>meh</blah></foo>') class JidTest(unittest.TestCase): def testBasic(self): jid = xmppserver.Jid('foo', 'bar.com') self.assertEqual(str(jid), 'foo@bar.com') def testResource(self): jid = xmppserver.Jid('foo', 'bar.com', 'resource') self.assertEqual(str(jid), 'foo@bar.com/resource') def testGetBareJid(self): jid = xmppserver.Jid('foo', 'bar.com', 'resource') self.assertEqual(str(jid.GetBareJid()), 'foo@bar.com') class IdGeneratorTest(unittest.TestCase): def testBasic(self): id_generator = xmppserver.IdGenerator('foo') for i in xrange(0, 100): self.assertEqual('foo.%d' % i, id_generator.GetNextId()) class HandshakeTaskTest(unittest.TestCase): def setUp(self): self.data_received = 0 def SendData(self, _): self.data_received += 1 def SendStanza(self, _, unused=True): self.data_received += 1 def HandshakeDone(self, jid): self.jid = jid def DoHandshake(self, resource_prefix, resource, username, initial_stream_domain, auth_domain, auth_stream_domain): self.data_received = 0 id_generator = xmppserver.IdGenerator('foo') handshake_task = ( xmppserver.HandshakeTask(self, id_generator, resource_prefix)) stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>') stream_xml.setAttribute('to', initial_stream_domain) self.assertEqual(self.data_received, 0) handshake_task.FeedStanza(stream_xml) self.assertEqual(self.data_received, 2) if auth_domain: username_domain = '%s@%s' % (username, auth_domain)
def FeedStanza(self, stanza): """Inspects the given stanza and changes the handshake state if needed.
return AddrString(self._addr) def collect_incoming_data(self, data): self._parser.FeedString(data) def found_terminator(self): asynchat.async_chat.found_terminator(self) def handle_close(self): print "Closing connection to %s" % self self._connections.discard(self) del self._socket_map[self.fileno()] def FeedStanza(self, stanza): if self._handshake_task: self._handshake_task.FeedStanza(stanza) elif stanza.tagName == 'iq': self._HandleIq(stanza) else: raise UnexpectedXml(stanza) def HandshakeDone(self, jid): self._jid = jid self._handshake_task = None self._connections.add(self) print "Handshake done for %s" % self def _HandleIq(self, iq): if (iq.firstChild and iq.firstChild.namespaceURI == 'google:notifier'): iq_id = iq.getAttribute('id') self._HandleNotifierCommand(iq_id, iq.firstChild) elif iq.getAttribute('type') == 'result': pass else: raise UnexpectedXml(iq) def _HandleNotifierCommand(self, id, command_xml): command = command_xml.tagName if command == 'getAll': if not command_xml.getElementsByTagName('SubscribedServiceUrl'): raise UnexpectedXml(command_xml) self._SendNotifierStanza(id, 'result') elif command == 'set': SendNotification(self._connections) else: raise UnexpectedXml(command_xml) def _SendNotifierStanza(self, id, type): stanza = CloneXml(self._NOTIFIER_STANZA) stanza.setAttribute('from', str(self._jid.GetBareJid())) stanza.setAttribute('to', str(self._jid)) stanza.setAttribute('id', id) stanza.setAttribute('type', type) self.SendStanza(stanza) def SendStanza(self, stanza, unlink=True): """Sends a stanza to the client. Args: stanza: The stanza to send. unlink: Whether to unlink stanza after sending it. (Pass in False if stanza is a constant.) """ self.SendData(stanza.toxml()) if unlink: stanza.unlink() def SendData(self, data): """Sends raw data to the client. """ self.push(data.encode('ascii')) def SendNotification(self): """Sends a notification to the client.""" next_id = self._id_generator.GetNextId() self._SendNotifierStanza(next_id, 'set') def SendNotification(connections): """Sends a notification to all connections in the given sequence.""" for connection in connections: print 'Sending notification to %s' % connection connection.SendNotification() class XmppServer(asyncore.dispatcher): """The main XMPP server class. The XMPP server starts accepting connections on the given address and spawns off XmppConnection objects for each one. Use like so:
username_domain = username auth_string = base64.b64encode('\0%s\0bar' % username_domain) auth_xml = xmppserver.ParseXml('<auth>%s</auth>'% auth_string) handshake_task.FeedStanza(auth_xml) self.assertEqual(self.data_received, 3) stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>') stream_xml.setAttribute('to', auth_stream_domain) handshake_task.FeedStanza(stream_xml) self.assertEqual(self.data_received, 5) bind_xml = xmppserver.ParseXml( '<iq type="set"><bind><resource>%s</resource></bind></iq>' % resource) handshake_task.FeedStanza(bind_xml) self.assertEqual(self.data_received, 6) session_xml = xmppserver.ParseXml( '<iq type="set"><session></session></iq>') handshake_task.FeedStanza(session_xml) self.assertEqual(self.data_received, 7) self.assertEqual(self.jid.username, username) self.assertEqual(self.jid.domain, auth_stream_domain or auth_domain or initial_stream_domain) self.assertEqual(self.jid.resource, '%s.%s' % (resource_prefix, resource)) def testBasic(self): self.DoHandshake('resource_prefix', 'resource', 'foo', 'bar.com', 'baz.com', 'quux.com') def testDomainBehavior(self): self.DoHandshake('resource_prefix', 'resource', 'foo', 'bar.com', 'baz.com', 'quux.com') self.DoHandshake('resource_prefix', 'resource', 'foo', 'bar.com', 'baz.com', '') self.DoHandshake('resource_prefix', 'resource', 'foo', 'bar.com', '', '') self.DoHandshake('resource_prefix', 'resource', 'foo', '', '', '') class XmppConnectionTest(unittest.TestCase): def setUp(self): self.data = [] def fileno(self): return 0 def setblocking(self, int): pass def getpeername(self): return ('', 0) def send(self, data): self.data.append(data) pass def testBasic(self): connections = set() xmpp_connection = xmppserver.XmppConnection( self, {}, connections, ('', 0)) self.assertEqual(len(connections), 0) xmpp_connection.HandshakeDone(xmppserver.Jid('foo', 'bar')) self.assertEqual(len(connections), 1) self.assertEqual(len(self.data), 0) xmpp_connection.collect_incoming_data( '<iq><getAll xmlns="google:notifier">' '<SubscribedServiceUrl/></getAll></iq>') self.assertEqual(len(self.data), 1) xmpp_connection.collect_incoming_data('<iq type="result"/>') self.assertEqual(len(self.data), 1) xmpp_connection.collect_incoming_data( '<iq><set xmlns="google:notifier"/></iq>') self.assertEqual(len(self.data), 2) def SendUnexpectedStanza(): xmpp_connection.collect_incoming_data('<foo/>') self.assertRaises(xmppserver.UnexpectedXml, SendUnexpectedStanza) def SendUnexpectedNotifierCommand(): xmpp_connection.collect_incoming_data( '<iq><foo xmlns="google:notifier"/></iq>') self.assertRaises(xmppserver.UnexpectedXml, SendUnexpectedNotifierCommand) class XmppServerTest(unittest.TestCase): def fileno(self): return 0 def setblocking(self, int): pass def getpeername(self): return ('', 0) def testBasic(self): class FakeXmppServer(xmppserver.XmppServer): def accept(self2): return (self, ('', 0))
def __str__(self): if self._jid: return str(self._jid) else: return AddrString(self._addr)
xmpp_server = xmppserver.XmppServer(socket_map, ('127.0.0.1', 5222)) asyncore.loop(30.0, False, socket_map) """ def __init__(self, socket_map, addr): asyncore.dispatcher.__init__(self, None, socket_map) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.set_reuse_addr() self.bind(addr) self.listen(5) self._socket_map = socket_map self._socket_map[self.fileno()] = self self._connections = set() print 'XMPP server running at %s' % AddrString(addr) def handle_accept(self): (sock, addr) = self.accept() XmppConnection(sock, self._socket_map, self._connections, addr)
self.assertEqual(len(socket_map), 0) xmpp_server = FakeXmppServer(socket_map, ('', 0)) self.assertEqual(len(socket_map), 1) xmpp_server.handle_accept() self.assertEqual(len(socket_map), 2) if __name__ == '__main__': unittest.main()
def SendNotification(connections): """Sends a notification to all connections in the given sequence.""" for connection in connections: print 'Sending notification to %s' % connection connection.SendNotification()
def percentile(N, percent, key=lambda x:x): """ Find the percentile of a list of values.
def __init__(self): self.site='UNDEFINED' self.times=[]
@parameter N - is a list of values. Note N MUST BE already sorted. @parameter percent - a float value from 0.0 to 1.0. @parameter key - optional key function to compute value from each element of N.
class desktopui_PageCyclerTests(test.test): version = 1 results = {}
def percentile(N, percent, key=lambda x:x): """ Find the percentile of a list of values. @parameter N - is a list of values. Note N MUST BE already sorted. @parameter percent - a float value from 0.0 to 1.0. @parameter key - optional key function to compute value from each element of N. @return - the percentile of the values """ if not N: return None k = (len(N)-1) * percent f = math.floor(k) c = math.ceil(k) if f == c: return key(N[int(k)]) d0 = key(N[int(f)]) * (k-f) d1 = key(N[int(c)]) * (c-k) return d0+d1
@return - the percentile of the values """ if not N: return None k = (len(N)-1) * percent f = math.floor(k) c = math.ceil(k) if f == c: return key(N[int(k)]) d0 = key(N[int(f)]) * (k-f) d1 = key(N[int(c)]) * (c-k) return d0+d1
def run_page_cycler(self, gtest_filter = ''): assert(gtest_filter != ''), gtest_filter+' cannot be empty!' cmd = ('CR_SOURCE_ROOT=/home/chronos/chromium/src /home/chronos/' 'chromium/src/x86-generic_out/Release/page_cycler_tests' ' --gtest_filter=')+gtest_filter xcmd = site_ui.xcommand(cmd) logging.debug('Running: '+gtest_filter) output = utils.system_output(xcmd) pcrp = PageCyclerResultsParser() result = pcrp.parse_results(output) logging.debug(result) self.results[gtest_filter] = result
def percentile(N, percent, key=lambda x:x): """ Find the percentile of a list of values. @parameter N - is a list of values. Note N MUST BE already sorted. @parameter percent - a float value from 0.0 to 1.0. @parameter key - optional key function to compute value from each element of N. @return - the percentile of the values """ if not N: return None k = (len(N)-1) * percent f = math.floor(k) c = math.ceil(k) if f == c: return key(N[int(k)]) d0 = key(N[int(f)]) * (k-f) d1 = key(N[int(c)]) * (c-k) return d0+d1
def mean(numbers): assert(len(numbers) != 0), 'list should not be empty!' return sum(numbers)/len(numbers) class PageCyclerResultsParser: def parse_file(self, outfile = 'out.txt'): output = open(outfile).read() return self.parse_results(output) def parse_results(self, output = ''): median = functools.partial(percentile, percent=0.5) assert(output != ''), 'Output cannot be empty!' lines = output.split('\n') found = False token = '*RESULT times:' for index, line in enumerate(lines): if(line.startswith(token)): found = True break assert(found==True), token+' not found!?' timesline = lines[index] sitesline = lines[index-1] m = re.search('\[(.*?)\]', sitesline) sites = m.group(1).split(',') m = re.search('\[(.*?)\]', timesline) times = m.group(1).split(',') assert(len(times) % len(sites) == 0), 'Times not divisible by sites!' iterations = len(times)/len(sites) stList = [] for ii, site in enumerate(sites): st = SiteTimes() st.site = site for jj in range(0, iterations): mytime = float(times[jj*len(sites)+ii]) st.times.append(mytime) stList.append(st) medianList = [] totalTime = 0 for ii, st in enumerate(stList): sortedTimes=sorted(st.times) sortedTimes.pop() totalTime += mean(sortedTimes) return totalTime/len(stList)
def run_once(self): testNames=['PageCyclerTest.Alexa_usFile', 'PageCyclerTest.MozFile', 'PageCyclerTest.Intl1File', 'PageCyclerTest.Intl2File', 'PageCyclerTest.DhtmlFile', 'PageCyclerTest.Moz2File', 'PageCyclerTest.BloatFile', 'PageCyclerTest.DomFile', 'PageCyclerTest.MorejsFile', 'PageCyclerTest.MorejsnpFile'] for testName in testNames: self.run_page_cycler(testName) self.write_perf_keyval(self.results)
def mean(numbers): assert(len(numbers) != 0), 'list should not be empty!' return sum(numbers)/len(numbers)
if match: content = callback + '(' + content + ')'
if match: content = callback + '(' + content.decode('utf-8') + ')'
def get(self): extURL = cgi.escape(self.request.get('extURL')) extMethod = cgi.escape(self.request.get('extMethod')) queryString = cgi.escape(self.request.query_string) queryDict = dict(cgi.parse_qsl(queryString)) callback = cgi.escape(self.request.get('_callback')) if queryString: error = 1 method = urlfetch.POST if extURL: del queryDict['extURL'] if extMethod: del queryDict['extMethod'] m = extMethod.lower() if m == 'put': method = urlfetch.PUT elif m == 'delete': method = urlfetch.DELETE elif m == 'get': method = urlfetch.GET # Huh?? elif m == 'head': method = urlfetch.HEAD # Oh, wait the minute... if len(queryDict): try: data = urllib.urlencode(queryDict) result = urlfetch.fetch(extURL, method=method, payload=data) if result.status_code == 200 or result.status_code == 201: error = 0 self.response.headers['Content-Type'] = 'application/javascript; charset=utf-8' content = result.content if callback: logging.info('Adding callback to JSON') exp = re.compile('^[A-Za-z_$][A-Za-z0-9._$]*?$') match = exp.match(callback) if match: content = callback + '(' + content + ')' self.response.out.write(content) except urlfetch.Error: logging.error('urlfetch error') error = 1 if error: self.response.set_status(400) self.response.out.write('Status: 400 Error parsing URL. There was an error processing your request: Error parsing URL.') else: self.response.out.write(""" <!DOCTYPE html> <title>jsonptunnel</title> <style> body{font-family: helvetica, arial, sans-serif} var{font-weight: bold; font-style: normal;} dt{display: list-item;} dl{margin-left: 40px;} </style> <h1>jsonptunnel</h1> <p>JSONP tunnel for letting you POST to remote services from your client-side JavaScript application and receive JSONP data.</p> <p><a href="http://labs.thinkminimo.com/jsonptunnel/#example">Try it out on the example form</a> and put <strong>http://jsonptunnel.appspot.com/</strong> as the jsonptunnel URL.</p> <p>Or try the following URL: <a href="/?callback=foo&amp;extURL=http://dipert.org/alan/calc.php&amp;num1=1&amp;num2=2">/?callback=foo&amp;extURL=http://dipert.org/alan/calc.php&amp;num1=1&amp;num2=2</a></p> <p>The parameters:</p> <dl> <dt><var>extURL</var></dt> <dd>Indicates the <em>external</em> web service URL. <strong>Required</strong>.</dd> <dt><var>extMethod</var> <em>(experimental)</em></dt> <dd>Indicates the HTTP method to use for the request, such as: <ul> <li>post <em>(default)</em></li> <li>put</li> <li>delete</li> </ul> </dd> <dt>...and any parameters to pass to the web service.</dt> </dl> <p>Inspired by <a href="http://ubergibson.com/">Alan Dipert</a>'s <a href="http://labs.thinkminimo.com/jsonptunnel/">jsonptunnel</a>. <a href="http://jsonptunnel.googlecode.com/">Google Code</a></p> """)
entry += " '%s', '%s', '%s'))\n" % ( moduleName, 'deprecated', 'deprecated' )
if kind == "Library": entry += " '%s', '%s', '%s'), '%s')\n" % ( moduleName, 'deprecated', 'deprecated', moduleName.upper()) else: entry += " '%s', '%s', '%s'))\n" % ( moduleName, 'deprecated', 'deprecated')
def _appendToProjectsPy(self, moduleName, branchLocation, destination, template):
self.addPluginPath(os.path.join("/usr","lib", "python%d.%d" % (major, minor), "dist-packages", "openwns", "wrowser", "playgroundPlugins"))
self.addPluginPath(os.path.join("/usr","local","lib", "python%d.%d" % (major, minor), "dist-packages", "openwns", "wrowser", "playgroundPlugins"))
def __init__(self): """ Initialization of members. No other functionality. """ usage = "" usage += "The list below shows global available options.\n"
self.field, self.form, self),
self.field, self.form, self.content),
def update(self): value = zope.component.queryMultiAdapter( (self.context, self.request, self.widget, self.field, self.form, self), interfaces.IValue, name='message') if value is not None: self.message = value.get() else: self.message = self.createMessage()
iface = zope.interface.interface.InterfaceClass( 'IGeneratedForObject_%i' %hash(spec)) zope.interface.alsoProvides(spec, iface) spec = iface
ifaceName = 'IGeneratedForObject_%i' %hash(spec) existingInterfaces = [ i for i in zope.interface.directlyProvidedBy(spec) if i.__name__ == ifaceName ] if len(existingInterfaces) > 0: spec = existingInterfaces[0] else: iface = zope.interface.interface.InterfaceClass(ifaceName) zope.interface.alsoProvides(spec, iface) spec = iface
def getSpecification(spec, force=False): """Get the specification of the given object. If the given object is already a specification acceptable to the component architecture, it is simply returned. This is true for classes and specification objects (which includes interfaces). In case of instances, an interface is generated on the fly and tagged onto the object. Then the interface is returned as the specification. """ # If the specification is an instance, then we do some magic. if (force or (spec is not None and not zope.interface.interfaces.ISpecification.providedBy(spec) and not isinstance(spec, classTypes)) ): # Step 1: Create an interface iface = zope.interface.interface.InterfaceClass( 'IGeneratedForObject_%i' %hash(spec)) # Step 2: Directly-provide the interface on the specification zope.interface.alsoProvides(spec, iface) # Step 3: Make the new interface the specification for this instance spec = iface return spec
return self.data.get(self.field.__name__, self.field.missing_value)
value = self.data.get(self.field.__name__, _marker) if value is _marker: raise AttributeError return value
def get(self): """See z3c.form.interfaces.IDataManager""" return self.data.get(self.field.__name__, self.field.missing_value)
baseuri+"
baseuri+prefix,
def _construct_ids(self, element, prefix, baseuri, skip_fragments=[], find_definitions = False): find_definitions_recursive = find_definitions counters = defaultdict(int) if isinstance(element, CompoundStructure): # Hitta begreppsdefinitioner if isinstance(element, Paragraf): # kolla om frsta stycket innehller en text som # antyder att definitioner fljer # log.debug("Testing %r against some regexes" % element[0][0]) if self.re_definitions(element[0][0]): find_definitions = "normal" if (self.re_brottsdef(element[0][0]) or self.re_brottsdef_alt(element[0][0])): find_definitions = "brottsrubricering" if self.re_parantesdef(element[0][0]): find_definitions = "parantes" if self.re_loptextdef(element[0][0]): find_definitions = "loptext"
sets = [{'label':'Naive set 1' 'predicate',TEMP['naive1'], 'data',rs1},
sets = [{'label':'Naive set 1', 'predicate':TEMP['naive1'], 'data':rs1},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
'predicate', TEMP['naive2'], 'data',rs2},
'predicate':TEMP['naive2'], 'data':rs2},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
'predicate', TEMP['naive3'], 'data',rs3},
'predicate':TEMP['naive3'], 'data':rs3},
def prep_annotation_file(self,basefile): print "prep_annotation_file"
'predicate', TEMP['naive4'], 'data',rs4}]
'predicate':TEMP['naive4'], 'data':rs4}]
def prep_annotation_file(self,basefile): print "prep_annotation_file"
pass
def prep_annotation_file(self,basefile): print "prep_annotation_file"
re_urisegments = re.compile(r'([\w]+://[^/]+/[^\d]*)(\d+:(bih\. |N|)?\d+( s\.\d+|))
re_urisegments = re.compile(r'([\w]+://[^/]+/[^\d]*)(\d+:(bih\.[_ ]|N|)?\d+([_ ]s\.\d+|))
def __str__(self): return repr(self.value)
level = self.getFormData("level", None) if level is not None: if level=="top": query += ' AND dc_identifier:"http://purl.org/anzsrc/seo/ else: query += ' AND skos_broader:"%s"' % level
def __getSolrData(self): prefix = self.getSearchTerms() if prefix: query = '%(prefix)s OR %(prefix)s*' % { "prefix" : prefix } else: query = "*:*" req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') req.addParam("fq", 'repository_type:"SEO"') req.setParam("fl", "score") req.setParam("sort", "score desc") req.setParam("start", self.getStartIndex()) req.setParam("rows", self.getItemsPerPage()) level = self.getFormData("level", None) if level is not None: if level=="top": #query += " AND skos_hasTopConcept:http*" query += ' AND dc_identifier:"http://purl.org/anzsrc/seo/#division"' else: query += ' AND skos_broader:"%s"' % level try: out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) return JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Failed to lookup '{}': {}", prefix, str(e)) return JsonConfigHelper()
def __init__(self, bundleDir, ipswDir, outDir, verbose): self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose def fileWithSuffix(self, filePath, suffix): if filePath.lower().endswith('.dmg'): filePath = filePath[:-4] suffix = suffix + '.dmg' return path.join(self.outDir, path.basename(filePath) + suffix) def decrypt_file(self, filePath, iv, key): decrypt_cmd = "xpwntool %s %s -iv %s -k %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.dec'), iv, key) if self.verbose: print "Unpacking: '%s'" % decrypt_cmd os.system(decrypt_cmd) def patch_file(self, filePath, patchFile): patch_cmd = "bspatch %s %s %s" % \ (self.fileWithSuffix(filePath, '.dec'), self.fileWithSuffix(filePath, '.dec.p'), path.join(self.bundleDir, patchFile)) if self.verbose: print "Patching: '%s'" % patch_cmd os.system(patch_cmd) def diff_llb(self, patch): filePath = patch [ 'File' ] patchFile = patch [ 'Patch' ] encrypt_cmd = "xpwntool %s %s -t %s -xn8824k -iv %s -k %s" % \ (self.fileWithSuffix(filePath, ".dec.ap"), self.fileWithSuffix(filePath, '.ap'), \ path.join(self.ipswDir, filePath) , patch['IV'], patch['Key']) if self.verbose: print "Encrypting LLB: '%s'" % encrypt_cmd os.system(encrypt_cmd) diff_cmd = "bsdiff %s %s %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.ap'), path.join(self.bundleDir, patchFile)) if self.verbose: print "Diffing LLB: '%s'" % diff_cmd os.system(diff_cmd) def diff_file(self, patch): filePath = patch['File'] patchFile = patch['Patch'] if path.basename(filePath).startswith('LLB'): self.diff_llb(patch) return if 'IV' in patch: orig_suffix = '.dec' ap_suffix = '.dec.ap' else: orig_suffix = '' ap_suffix = '.ap' diff_cmd = "bsdiff %s %s %s" % \ (self.fileWithSuffix(filePath, orig_suffix), self.fileWithSuffix(filePath, ap_suffix), path.join(self.bundleDir, patchFile)) if self.verbose: print "Diffing: '%s'" % diff_cmd os.system(diff_cmd) def decrypt_rootfs(self): key = self.infoPlist['RootFilesystemKey'] dmg = self.infoPlist['RootFilesystem'] vfdecrypt_cmd = "vfdecrypt -i %s -o %s -k %s" % \ (path.join(self.ipswDir, dmg), self.fileWithSuffix(dmg, '.dec'), key) if self.verbose: print "vfdecrypt: '%s'" % vfdecrypt_cmd os.system(vfdecrypt_cmd) mount_cmd = "hdiutil attach %s" % self.fileWithSuffix(dmg, '.dec') if self.verbose: print "hdiutil: '%s'" % mount_cmd os.system(mount_cmd) def fspatch_extract_callback(self, patch): if not 'Patch' in patch: return filePath = patch['File'] mountpoint = path.join('/Volumes', self.infoPlist['RootFilesystemMountVolume']) cp_cmd = "cp %s %s" % (path.join(mountpoint, filePath), self.fileWithSuffix(filePath, "")) if self.verbose: print "cp: '%s'" % cp_cmd os.system(cp_cmd) def mount_ramdisk(self): firmwarePatches = self.infoPlist['FirmwarePatches'] if not 'Restore Ramdisk' in firmwarePatches: return patch = firmwarePatches['Restore Ramdisk'] filePath = patch['File'] mount_cmd = "hdiutil attach %s" % self.fileWithSuffix(filePath, '.dec') if self.verbose: print "hdiutil: '%s'" % mount_cmd os.system(mount_cmd) def fwpatch_decrypt_callback(self, patch, patchKey): if not 'IV' in patch: return self.decrypt_file(patch['File'], patch['IV'], patch['Key']) if 'Patch' in patch: self.patch_file(patch['File'], patch['Patch']) def genpatch_create_callback(self, patch): if 'Patch' in patch: self.diff_file(patch) def fwpatch_create_callback(self, patch, patchKey): self.genpatch_create_callback(patch) def foreach_fwpatch(self, callback): firmwarePatches = self.infoPlist['FirmwarePatches'] for patchKey in firmwarePatches: patch = firmwarePatches[patchKey] callback(patch, patchKey) def foreach_fspatch(self, callback): filesystemPatches = self.infoPlist['FilesystemPatches'] for patchGroupKey in filesystemPatches: patchGroup = filesystemPatches[patchGroupKey] for patch in patchGroup: callback(patch) def rdpatch_extract_callback(self, patch): filePath = patch['File'] ramdiskKey = None for key in ['RestoreRamdiskMountVolume','RamdiskMountVolume']: if key in self.infoPlist: ramdiskKey = key break if not ramdiskKey: return mountpoint = path.join('/Volumes', self.infoPlist[ramdiskKey]) cp_cmd = "cp %s %s" % (path.join(mountpoint, filePath), self.fileWithSuffix(filePath, "")) if self.verbose: print "cp: '%s'" % cp_cmd os.system(cp_cmd) def foreach_rdpatch(self, callback): rdPatches = self.infoPlist['RamdiskPatches'] for rdKey in rdPatches: patch = rdPatches[rdKey] callback(patch) def umount_all(self): for key in ['RamdiskMountVolume', 'RestoreRamdiskMountVolume', 'RootFilesystemMountVolume']: if not key in self.infoPlist: continue mountpoint = path.join('/Volumes', self.infoPlist[key]) umount_cmd = "hdiutil detach %s" % mountpoint if self.verbose: print "Unmount: '%s'" % umount_cmd os.system(umount_cmd) def process_info_plist(self): self.infoPlist = plistlib.readPlist(path.join(self.bundleDir, 'Info.plist')) self.foreach_fwpatch(self.fwpatch_decrypt_callback) self.mount_ramdisk() self.foreach_rdpatch(self.rdpatch_extract_callback) self.decrypt_rootfs() self.foreach_fspatch(self.fspatch_extract_callback) self.umount_all() def create_patch_files(self): self.infoPlist = plistlib.readPlist(path.join(self.bundleDir, 'Info.plist')) self.foreach_fwpatch(self.fwpatch_create_callback) self.foreach_rdpatch(self.genpatch_create_callback) self.foreach_fspatch(self.genpatch_create_callback)
def __init__(self, bundleDir, ipswDir, outDir, verbose, x_opt): self.x_opt = x_opt self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose def fileWithSuffix(self, filePath, suffix): if filePath.lower().endswith('.dmg'): filePath = filePath[:-4] suffix = suffix + '.dmg' return path.join(self.outDir, path.basename(filePath) + suffix) def unpack_file(self, filePath): decrypt_cmd = "xpwntool %s %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.dec')) if self.verbose: print "Unpacking: '%s'" % decrypt_cmd os.system(decrypt_cmd) def decrypt_file(self, filePath, iv, key): decrypt_cmd = "xpwntool %s %s -iv %s -k %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.dec'), iv, key) if self.verbose: print "Decrypting: '%s'" % decrypt_cmd os.system(decrypt_cmd) def patch_file(self, filePath, patchFile): patch_cmd = "bspatch %s %s %s" % \ (self.fileWithSuffix(filePath, '.dec'), self.fileWithSuffix(filePath, '.dec.p'), path.join(self.bundleDir, patchFile)) if self.verbose: print "Patching: '%s'" % patch_cmd os.system(patch_cmd) def diff_llb(self, patch, x_opt): filePath = patch [ 'File' ] patchFile = patch [ 'Patch' ] encrypt_cmd = "xpwntool %s %s -t %s -x%s -iv %s -k %s" % \ (self.fileWithSuffix(filePath, ".dec.ap"), self.fileWithSuffix(filePath, '.ap'), \ path.join(self.ipswDir, filePath) , x_opt , patch['IV'], patch['Key']) if self.verbose: print "Encrypting LLB: '%s'" % encrypt_cmd os.system(encrypt_cmd) diff_cmd = "bsdiff %s %s %s" % \ (path.join(self.ipswDir, filePath), self.fileWithSuffix(filePath, '.ap'), path.join(self.bundleDir, patchFile)) if self.verbose: print "Diffing LLB: '%s'" % diff_cmd os.system(diff_cmd) def ldid(self, path): ldid_cmd = "ldid -s %s" % path if self.verbose: print "Pseudosigning: '%s'" % ldid_cmd os.system(ldid_cmd) def fuzzy_patch(self, patch, origPath, patchedPath): deltaFile = patch['Pattern'] fzp_cmd = "fuzzy_patcher --fuzz 80 --patch --orig %s --patched %s --delta %s" % \ (origPath, patchedPath, path.join(self.outDir, "_json", deltaFile + ".delta.json")) if self.verbose: print "Fuzzy patching: '%s'" % fzp_cmd os.system(fzp_cmd) if not path.basename(origPath).startswith('asr'): return self.ldid(patchedPath) def diff_file(self, patch, isFirmwarePatch): filePath = patch['File'] patchFile = patch['Patch'] if path.basename(filePath).startswith('LLB') and self.x_opt: self.diff_llb(patch, self.x_opt) return if isFirmwarePatch: orig_suffix = '.dec' ap_suffix = '.dec.ap' else: orig_suffix = '' ap_suffix = '.ap' origPath = self.fileWithSuffix(filePath, orig_suffix) patchedPath = self.fileWithSuffix(filePath, ap_suffix) if 'Pattern' in patch: self.fuzzy_patch(patch, origPath, patchedPath) diff_cmd = "bsdiff %s %s %s" % \ (origPath, patchedPath, path.join(self.bundleDir, patchFile)) if self.verbose: print "Diffing: '%s'" % diff_cmd os.system(diff_cmd) def decrypt_rootfs(self): key = self.infoPlist['RootFilesystemKey'] dmg = self.infoPlist['RootFilesystem'] vfdecrypt_cmd = "vfdecrypt -i %s -o %s -k %s" % \ (path.join(self.ipswDir, dmg), self.fileWithSuffix(dmg, '.dec'), key) if self.verbose: print "vfdecrypt: '%s'" % vfdecrypt_cmd os.system(vfdecrypt_cmd) mount_cmd = "hdiutil attach %s" % self.fileWithSuffix(dmg, '.dec') if self.verbose: print "hdiutil: '%s'" % mount_cmd os.system(mount_cmd) def fspatch_extract_callback(self, patch): if not 'Patch' in patch: return filePath = patch['File'] mountpoint = path.join('/Volumes', self.infoPlist['RootFilesystemMountVolume']) cp_cmd = "cp %s %s" % (path.join(mountpoint, filePath), self.fileWithSuffix(filePath, "")) if self.verbose: print "cp: '%s'" % cp_cmd os.system(cp_cmd) def mount_ramdisk(self): firmwarePatches = self.infoPlist['FirmwarePatches'] if not 'Restore Ramdisk' in firmwarePatches: return patch = firmwarePatches['Restore Ramdisk'] filePath = patch['File'] mount_cmd = "hdiutil attach %s" % self.fileWithSuffix(filePath, '.dec') if self.verbose: print "hdiutil: '%s'" % mount_cmd os.system(mount_cmd) def fwpatch_decrypt_callback(self, patch, patchKey): if not 'IV' in patch: self.unpack_file(patch['File']) else: self.decrypt_file(patch['File'], patch['IV'], patch['Key']) if 'Patch' in patch: self.patch_file(patch['File'], patch['Patch']) def genpatch_create_callback(self, patch): if 'Patch' in patch: self.diff_file(patch, isFirmwarePatch = False) def fwpatch_create_callback(self, patch, patchKey): if 'Patch' in patch: self.diff_file(patch, isFirmwarePatch = True) def foreach_fwpatch(self, callback): firmwarePatches = self.infoPlist['FirmwarePatches'] for patchKey in firmwarePatches: patch = firmwarePatches[patchKey] callback(patch, patchKey) def foreach_fspatch(self, callback): filesystemPatches = self.infoPlist['FilesystemPatches'] for patchGroupKey in filesystemPatches: patchGroup = filesystemPatches[patchGroupKey] for patch in patchGroup: callback(patch) def rdpatch_extract_callback(self, patch): filePath = patch['File'] ramdiskKey = None for key in ['RestoreRamdiskMountVolume','RamdiskMountVolume']: if key in self.infoPlist: ramdiskKey = key break if not ramdiskKey: return mountpoint = path.join('/Volumes', self.infoPlist[ramdiskKey]) cp_cmd = "cp %s %s" % (path.join(mountpoint, filePath), self.fileWithSuffix(filePath, "")) if self.verbose: print "cp: '%s'" % cp_cmd os.system(cp_cmd) def foreach_rdpatch(self, callback): rdPatches = self.infoPlist['RamdiskPatches'] for rdKey in rdPatches: patch = rdPatches[rdKey] callback(patch) def umount_all(self): for key in ['RamdiskMountVolume', 'RestoreRamdiskMountVolume', 'RootFilesystemMountVolume']: if not key in self.infoPlist: continue mountpoint = path.join('/Volumes', self.infoPlist[key]) umount_cmd = "hdiutil detach %s" % mountpoint if self.verbose: print "Unmount: '%s'" % umount_cmd os.system(umount_cmd) def process_info_plist(self): self.infoPlist = plistlib.readPlist(path.join(self.bundleDir, 'Info.plist')) self.foreach_fwpatch(self.fwpatch_decrypt_callback) self.mount_ramdisk() self.foreach_rdpatch(self.rdpatch_extract_callback) self.decrypt_rootfs() self.foreach_fspatch(self.fspatch_extract_callback) self.umount_all() def create_patch_files(self): self.infoPlist = plistlib.readPlist(path.join(self.bundleDir, 'Info.plist')) self.foreach_fwpatch(self.fwpatch_create_callback) self.foreach_rdpatch(self.genpatch_create_callback) self.foreach_fspatch(self.genpatch_create_callback)
def __init__(self, bundleDir, ipswDir, outDir, verbose): self.bundleDir = bundleDir self.ipswDir = ipswDir self.outDir = outDir self.verbose = verbose
parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="OUT_DIR") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose mode") parser.add_option("-c", "--create", dest="create", action="store_true", default=False, help="Create patch files from work dir") (opts, args) = parser.parse_args() requiredOpts = ['bundle', 'ipsw', 'out'] for req in requiredOpts: if not opts.__dict__[req]: print "'%s' argument is mandatory!" % req exit(1) bundleParser = BundleParser( opts.bundle, opts.ipsw, opts.out, opts.verbose ) if opts.create: bundleParser.create_patch_files() else: bundleParser.process_info_plist()
parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="OUT_DIR") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose mode") parser.add_option("-c", "--create", dest="create", action="store_true", default=False, help="Create patch files from work dir") parser.add_option("-x", "--llbexploit", dest="x_opt", default=None, help="Type of LLB exploit to use, n8824k or 24k") (opts, args) = parser.parse_args() requiredOpts = ['bundle', 'ipsw', 'out'] for req in requiredOpts: if not opts.__dict__[req]: print "'%s' argument is mandatory!" % req exit(1) bundleParser = BundleParser( opts.bundle, opts.ipsw, opts.out, opts.verbose, opts.x_opt) if opts.create: bundleParser.create_patch_files() else: bundleParser.process_info_plist()
def main(): parser = OptionParser() parser.add_option("-b", "--bundle", dest="bundle", help="Bundle directory to use", metavar="BUNDLE_DIR") parser.add_option("-i", "--ipsw", dest="ipsw", help="Unpacked IPSW directory", metavar="IPSW_DIR") parser.add_option("-o", "--out", dest="out", help="Output directory", metavar="OUT_DIR") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Verbose mode") parser.add_option("-c", "--create", dest="create", action="store_true", default=False, help="Create patch files from work dir") (opts, args) = parser.parse_args() requiredOpts = ['bundle', 'ipsw', 'out'] for req in requiredOpts: if not opts.__dict__[req]: print "'%s' argument is mandatory!" % req exit(1) bundleParser = BundleParser( opts.bundle, opts.ipsw, opts.out, opts.verbose ) if opts.create: bundleParser.create_patch_files() else: bundleParser.process_info_plist()
kpatches = diff_kernel(sys.argv[3], sys.argv[4]) ibss = ibss_default_patches(sys.argv[1], sys.argv[2]) ibss_add_kpf(ibss, sys.argv[5]) ibss_add_kpatches(ibss, kpatches)
if len(sys.argv) < 6: print "Usage: ibss_patcher ibss_decrypted_orig ibss_out kernelcache_decrypted_orig kernelcache_decrypted_patched ibss_patchproc.bin" exit(1) kpatches = diff_kernel(sys.argv[3], sys.argv[4]) ibss = ibss_default_patches(sys.argv[1], sys.argv[2]) ibss_add_kpf(ibss, sys.argv[5]) ibss_add_kpatches(ibss, kpatches)
def byte_search(image, bytes): for i in range(0, len(image) - len(bytes), 2): if image[i:i+len(bytes)] == bytes: return i return -1
leftOfBox = xPt[ind-1]
leftOfBox = self.xPts[ind-1]
def value(self): "preserve volume during the binning" totalVol = 0 for ind,xPt,yPt in zip(range(self.numPts), self.xPts, self.yPts): #get leftOfBox if ind == 0: if self.leftSide < xPt: leftOfBox = self.leftSide else: leftOfBox = xPt else: leftOfBox = xPt[ind-1] #get rightOfBox if ind == (self.numPts-1): if self.rightSide > xPt: rightOfBox = self.rightSide else: rightOfBox = xPt else: rightOfBox=xPt[ind+1] boxLength = rightOfBox-leftOfBox boxVol = boxLength*yPt totalVol += boxVol totalHeight = totalVol/self.spacing return totalHeight
rightOfBox=xPt[ind+1]
rightOfBox = self.xPts[ind+1]
def value(self): "preserve volume during the binning" totalVol = 0 for ind,xPt,yPt in zip(range(self.numPts), self.xPts, self.yPts): #get leftOfBox if ind == 0: if self.leftSide < xPt: leftOfBox = self.leftSide else: leftOfBox = xPt else: leftOfBox = xPt[ind-1] #get rightOfBox if ind == (self.numPts-1): if self.rightSide > xPt: rightOfBox = self.rightSide else: rightOfBox = xPt else: rightOfBox=xPt[ind+1] boxLength = rightOfBox-leftOfBox boxVol = boxLength*yPt totalVol += boxVol totalHeight = totalVol/self.spacing return totalHeight
self.value = 0.0
def __init__(self, leftSide=None, spacing=None, numXPointsToBin=None): self.numXPointsToBin = numXPointsToBin self.value = 0.0 self.xPts = [] self.yPts = [] self.leftSide = leftSide self.spacing = spacing
if xPt >= currentBin.rightSide:
while xPt >= currentBin.rightSide:
def __init__(self, xDataP, yDataP, newBins=None, binValue = 'countPoints', format = 'columns'): xData = np.sort(xDataP) sortedInds = np.argsort(xDataP) yData = yDataP[sortedInds] self.bins=[] if type(newBins) is type(1): # this algorithm is for data that has already been binned and we're going over the bins to rebin import math leftOverPts, numXdataInBin = math.modf(len(xData)/len(newBins)) currentBin = Bin(numXPointsToBin = int(numXdataInBin)) for xPt,yPt in zip(xData,yData): if currentBin.getNumPts() >= numXdataInBin: currentBin.spacing = xPt - currentBin.xPts[0] self.bins.append(currentBin) currentBin = Bin(numXPointsToBin = int(numXdataInBin)) currentBin.xPts.append(xPt) if binValue=='countPoints': # then add together all the y axis values that fall within the new bin currentBin.value += yPt elif binValue=='averagePoints': #weight the average numPointsInBin = currentBin.getNumPts() currentBin.value = (numPointsInBin*currentBin.value + yPt)/(numPointsInBin+1) else: #assume newBins are equally spaced binCounter = 0 binSize = newBins[1] - newBins[0] currentBin = Bin(spacing = binSize, leftSide = newBins[binCounter]) for xPt,yPt in zip(xData,yData): if xPt >= currentBin.rightSide: self.bins.append(currentBin) binCounter += 1 currentBin = Bin(spacing = binSize, leftSide = newBins[binCounter]) currentBin.xPts.append(xPt) currentBin.yPts.append(yPt) # but when you plot, plot the y-axis value not at the x-axis pair, but at the midpoint between the x-axis pair # and the one up from it. Assume there is an additional x-axis point at the end with the same spacing as all the others.
axisGrp.create_dataset('bin centers', data = NdArray(bcs.datatype(), bcs))
axisGrp.create_dataset('bin centers', data = bcs)
def onAxis(self, axesGrp, axis, index): #index: index of this axis in the axis array #we need to index that so that axis can be loaded #sequentially. mapper = axis._mapper type = types[mapper.__class__]
if isinstance(unit, int) or isinstance(unit, float):
if isinstance(unit, int) or isinstance(unit, float) or isinstance(unit, long):
def onUnit(self, unit): if isinstance(unit, int) or isinstance(unit, float): return unit return unit.tostring()
dst = WithProperties('public_html/binaries/' + remote_name + '.bz2', 'got_revision')
dst = WithProperties('public_html/binaries/' + remote_name, 'got_revision')
def addUploadBinariesStep(factory, binaries): for (local_name, remote_name) in binaries.items(): dst = WithProperties('public_html/binaries/' + remote_name + '.bz2', 'got_revision') factory.addStep(FileUpload(slavesrc=local_name, masterdest=dst, mode=0755))
return pretty_date(dt)
return dt
def get_date(s): dt = datetime.strptime(s, '%Y-%m-%d %H:%M:%S') dt -= timedelta(seconds=time.timezone) # sqlite seems to save at GMT... ata :P return pretty_date(dt) # found this online
self.conn = cherrypy.thread_data.db self.c = self.conn.cursor()
def get_conn(self): return cherrypy.thread_data.db def get_cur(self): return self.get_conn().cursor() def fetchvar(self, query, args=()): c = self.get_cur() c.execute(query, args) return c.fetchone()[0]
def connect(self, thread_index): cherrypy.thread_data.db = sqlite3.connect('minitwit.sqlite') self.conn = cherrypy.thread_data.db self.c = self.conn.cursor()
self.c.execute(query, args) return self.c.fetchone()
c = self.get_cur() c.execute(query, args) return c.fetchone()
def fetchone(self, query, args=()): self.c.execute(query, args) return self.c.fetchone()
self.c.execute(query, args) return self.c.fetchalll()
c = self.get_cur() c.execute(query, args) return c.fetchall()
def fetchall(self, query, args=()): self.c.execute(query, args) return self.c.fetchalll()
self.c.execute(query, args) self.conn.commit()
c = self.get_cur() c.execute(query, args) self.get_conn().commit()
def query(self, query, args=()): self.c.execute(query, args) self.conn.commit()
conn = cherrypy.thread_data.db c = conn.cursor() c.execute("select rowid from users where username = ? and password = ?", (username, md5sum(password))) logged_in = c.fetchone()
logged_in = db.fetchone("select rowid from users where username = ? and password = ?", (username, md5sum(password)))
def login(self, username='', password='', redirect='/'): message = None if len(username) > 0 and len(password) > 0: conn = cherrypy.thread_data.db c = conn.cursor() c.execute("select rowid from users where username = ? and password = ?", (username, md5sum(password))) logged_in = c.fetchone() if logged_in is not None: cherrypy.session['logged_in'] = logged_in[0] raise cherrypy.HTTPRedirect(redirect) else: message = 'Invalid username/password' return templates.get_template('login.html').render(username=username, password=password, message=message)
conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select rowid, username from users where rowid = ?', (rowid,)) r = c.fetchone()
r = db.fetchone('select rowid, username from users where rowid = ?', (rowid,))
def get_logged_in(self): try: rowid = cherrypy.session.get('logged_in') conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select rowid, username from users where rowid = ?', (rowid,)) r = c.fetchone() return {'id': r[0], 'username': r[1]} except: return None
def default(self, id=None, text=None):
def default(self, id=None, text=None, last_update=None):
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
conn = cherrypy.thread_data.db c = conn.cursor()
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
c.execute('select rowid, text, date from posts where rowid = ?', (id,)) r = c.fetchone()
r = db.fetchone('select rowid, text, date from posts where rowid = ?', (id,))
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
c.execute('insert into posts values (?, ?, datetime("now"))', (logged_in['id'], text)) conn.commit()
db.query('insert into posts values (?, ?, datetime("now"))', (logged_in['id'], text))
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
try: c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') return json.dumps([{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r in c.fetchall()]) except: raise cherrypy.HTTPError(404)
if last_update is not None: last_update = int(last_update) / 1000 new_count = db.fetchvar('select count(*) from posts where strftime("%s", date) - ? > 0', (last_update,)) if int(new_count) == 0: return '[]' posts = db.fetchall('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') return json.dumps([{'id': r[0], 'text': r[1], 'date': pretty_date(get_date(r[2])), 'username': r[3]} for r in posts])
def default(self, id=None, text=None): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() m = cherrypy.request.method
c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r in c.fetchall()]
posts = db.fetchall('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': pretty_date(get_date(r[2])), 'username': r[3]} for r in posts]
def index(self): logged_in = Session().get_logged_in() conn = cherrypy.thread_data.db c = conn.cursor() c.execute('select posts.rowid, text, date, username from posts join users on posts.user = users.rowid order by date desc limit 10') posts = [{'id': r[0], 'text': r[1], 'date': get_date(r[2]), 'username': r[3]} for r in c.fetchall()] return templates.get_template('dashboard.html').render(logged_in=logged_in, posts=posts)
c.execute('insert into users values (?, ?)', username, md5sum(password)) conn.commit()
db.query('insert into users values (?, ?)', (username, md5sum(password)))
def register(self, username='', password='', conf_password=''): message = None if len(username) > 0 and len(password) > 0 and password == conf_password: c.execute('insert into users values (?, ?)', username, md5sum(password)) conn.commit() raise cherrypy.HTTPRedirect('/session/login') elif password != conf_password: message = "Passwords don't match" return templates.get_template('register.html').render(username=username, password=password, conf_password=conf_password, message=message)
conn = cherrypy.thread_data.db c = conn.cursor() c.execute("drop table if exists users") c.execute("drop table if exists posts") c.execute("create table users (username text, password text)") c.execute("create unique index username on users (username)") c.execute("create table posts (user int, text text, date text)") c.execute("create index user on posts (user)") c.execute("insert into users values (?, ?)", ('demo', md5sum('demo'))) c.execute("insert into posts values (?, ?, datetime('now'))", (1, 'Hello world')) conn.commit()
db.query("drop table if exists users") db.query("drop table if exists posts") db.query("create table users (username text, password text)") db.query("create unique index username on users (username)") db.query("create table posts (user int, text text, date text)") db.query("create index user on posts (user)") db.query("insert into users values (?, ?)", ('demo', md5sum('demo'))) db.query("insert into posts values (?, ?, datetime('now'))", (1, 'Hello world'))
def install(self): conn = cherrypy.thread_data.db c = conn.cursor() c.execute("drop table if exists users") c.execute("drop table if exists posts") c.execute("create table users (username text, password text)") c.execute("create unique index username on users (username)") c.execute("create table posts (user int, text text, date text)") c.execute("create index user on posts (user)") c.execute("insert into users values (?, ?)", ('demo', md5sum('demo'))) c.execute("insert into posts values (?, ?, datetime('now'))", (1, 'Hello world')) conn.commit() return "Tables created!"
reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList]))
reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList)))
def getRequirements(self, jobNum): reqs = Module.getRequirements(self, jobNum) if self.dataSplitter != None: reqs.append((WMS.STORAGE, self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList])) return reqs
return self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList] != []
return self.dataSplitter.getSplitInfo(jobNum).get(DataSplitter.SEList) != []
def canSubmit(self, jobNum): if self.checkSE and (self.dataSplitter != None): return self.dataSplitter.getSplitInfo(jobNum)[DataSplitter.SEList] != [] return True