function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def __init__(self, columns): self.columns = columns self.pretty_tbl_cols = ["Table", "Column Name", "Type"] self.use_schema = False for col in columns: if col.schema and not self.use_schema: self.use_schema = True self.pretty_tbl_cols.insert(0, "Schema")
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]
def _tablify(self): tbl = PrettyTable(self.pretty_tbl_cols) for col in self.pretty_tbl_cols: tbl.align[col] = "l" for col in self.columns: row_data = [col.table, col.name, col.type] if self.use_schema: row_data.insert(0, col.schema) tbl.add_row(row_data) return tbl
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]
def _repr_html_(self): return self._tablify().get_html_string()
yhat/db.py
[ 1220, 116, 1220, 33, 1414337817 ]
def cmp(x, y): if x<y: return -1 if x>y: return 1 return 0
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def describe_filter_method(filter_by): if callable(filter_by): return "matching a function called {}".format(filter_by.__name__) if isinstance(filter_by, six.string_types): return "containing the string {!r}".format(filter_by) if have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return "containing "+str(filter_by) if isinstance(filter_by, REGEX_PATTERN_TYPE): return "matching the regex {!r}".format(filter_by.pattern) else: return "which we're surprised we found at all"
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __init__(self, value, x, y, table, properties=None): self.value = value # of appropriate type self.x = x # column number self.y = y # row number self.table = table if properties is None: self.properties = {} else: self.properties = properties
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __eq__(self, rhs): """See _XYCell.__hash__ for equality conditions""" return hash(self) == hash(rhs)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __repr__(self): return "_XYCell(%r, %r, %r)" % \ (self.value, self.x, self.y)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def lookup(self, header_bag, direction, strict=False): """ Given a single cell (usually a value), a bag containing the headers of a particular type for that cell, and the direction in which to search for the relevant header e.g. for value cell V, searching up: [ ] [ ] [O] [ ] ---> [ ] V [ ] [ ] the cell with the arrow will be returned. Strict restricts the selection to cells in the same row/column as the value, so O is selected instead.""" def mult(cell): return cell.x * direction[0] + cell.y * direction[1] def same_row_col(a, b, direction): return (a.x - b.x == 0 and direction[0] == 0) or \ (a.y - b.y == 0 and direction[1] == 0) best_cell = None second_best_cell = None for target_cell in header_bag.unordered_cells: if mult(self) <= mult(target_cell): if not best_cell or mult(target_cell) <= mult(best_cell): if not strict or same_row_col(self, target_cell, direction): second_best_cell = best_cell best_cell = target_cell if second_best_cell and mult(best_cell) == mult(second_best_cell): raise LookupConfusionError("{!r} is as good as {!r} for {!r}".format( best_cell, second_best_cell, self)) if best_cell is None: raise NoLookupError("No lookup for {!r}".format(self)) return best_cell
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def junction_coord(cells, direction=DOWN): """ Under the hood: given two cells and a favoured direction, get the position of the cell with the column of one and the row of the other: A---->+ | ^ | | | | v | *<----B Both + and * are candidates for the junction of A and B - we take the one furthest down by default (specified by direction) >>> cells_dr = (_XYCell(0,1,2,None), _XYCell(0,3,4,None)) >>> junction_coord(cells_dr, DOWN) (1, 4) >>> junction_coord(cells_dr, UP) (3, 2) >>> junction_coord(cells_dr, LEFT) (1, 4) >>> junction_coord(cells_dr, RIGHT) (3, 2) >>> cells_tr = (_XYCell(0,1,4,None), _XYCell(0,3,2,None)) >>> junction_coord(cells_tr, DOWN) (3, 4) >>> junction_coord(cells_tr, UP) (1, 2) >>> junction_coord(cells_tr, LEFT) (1, 2) >>> junction_coord(cells_tr, RIGHT) (3, 4) """ new_cells = ( (cells[0].x, cells[1].y), (cells[1].x, cells[0].y) ) for index, value in enumerate(direction): if value == 0: continue if cmp(new_cells[0][index], new_cells[1][index]) == value: return new_cells[0] else: return new_cells[1]
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def shift(self, x=0, y=0): """Get the cell which is offset from this cell by x columns, y rows""" if not isinstance(x, int): assert y == 0, \ "_XYCell.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) return self.table.get_at(self.x + x, self.y + y)._cell
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def pprint(self, *args, **kwargs): return contrib_excel.pprint(self, *args, **kwargs)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def filter_one(self, filter_by): return contrib_excel.filter_one(self, filter_by)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __init__(self, table): self.__store = set() self.table = table
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __eq__(self, other): """Compare two bags: they are equal if: * their table are the same table (object) * they contain the same set of cells""" if not isinstance(other, CoreBag): return False return (self.table is other.table and self.__store == other.__store)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __repr__(self): return repr(self.__store)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def singleton(cls, cell, table): """ Construct a bag with one cell in it """ bag = cls(table=table) bag.add(cell) return bag
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def unordered(self): """ Obtain an unordered iterator over this bag. iter(bag) is sorted on demand, and therefore inefficient if being done repeatedly where order does not matter. """ return (Bag.singleton(c, table=self.table) for c in self.__store)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def unordered_cells(self): """ Analogous to the `unordered` property, except that it returns _XYCells instead of Bags. """ return iter(self.__store)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def yx(cell): return cell.y, cell.x
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __sub__(self, rhs): """Bags quack like sets. Implements - operator.""" return self.difference(rhs)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __or__(self, rhs): """Bags quack like sets. Implements | operator. For mathematical purity, + (__add__) isn't appropriate""" return self.union(rhs)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __and__(self, rhs): return self.intersection(rhs)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def select(self, function): """Select cells from this bag's table based on the cells in this bag. e.g. bag.select(lambda bag_cell, table_cell: bag_cell.y == table_cell.y and bag_cell.value == table_cell.value) would give cells in the table with the same name on the same row as a cell in the bag""" return self.table.select_other(function, self)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def filter(self, filter_by): """ Returns a new bag containing only cells which match the filter_by predicate. filter_by can be: a) a callable, which takes a cell as a parameter and returns True if the cell should be returned, such as `lambda cell: cell value == 'dog' b) a string, to match exactly: `u'dog'` c) a hamcrest match rule: `hamcrest.equal_to("dog") (requires hamcrest to be available) d) a compiled regex: `re.compile("dog") """ if callable(filter_by): return self._filter_internal(filter_by) elif isinstance(filter_by, six.string_types): return self._filter_internal(lambda cell: six.text_type(cell.value).strip() == filter_by) elif have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return self._filter_internal(lambda cell: filter_by.matches(cell.value)) elif isinstance(filter_by, REGEX_PATTERN_TYPE): return self._filter_internal( lambda cell: re.match(filter_by, six.text_type(cell.value))) else: raise ValueError("filter_by must be function, hamcrest filter, compiled regex or string.")
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def assert_one(self, message="assert_one() : {} cells in bag, not 1"): """Chainable: raise an error if the bag contains 0 or 2+ cells. Otherwise returns the original (singleton) bag unchanged.""" if len(self.__store) == 1: return self elif len(self.__store) == 0: raise NoCellsAssertionError( message.format( len(self.__store) ) ) elif len(self.__store) > 1: raise MultipleCellsAssertionError( message.format( len(self.__store) ) )
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def _cell(self): """Under the hood: get the cell inside a singleton bag. It's an error for it to not contain precisely one cell.""" try: xycell = list(self.assert_one().__store)[0] except AssertionError: l = len(list(self.__store)) raise XYPathError("Can't use multicell bag as cell: (len %r)" % l) else: assert isinstance(xycell, _XYCell) return xycell
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def value(self): """Getter for singleton's cell value""" return self._cell.value
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def x(self): """Getter for singleton's cell column number""" return self._cell.x
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def y(self): """Getter for singleton's cell row number""" return self._cell.y
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def properties(self): """Getter for singleton's cell properties""" return self._cell.properties
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def from_list(cells): """ Make a non-bag iterable of cells into a Bag. Some magic may be lost, especially if it's zero length. TODO: This should probably be part of the core __init__ class. TODO: Don't do a piece-by-piece insertion, just slap the whole listed iterable in, because this is slow. """ # TODO bag = Bag(table=None) for i, cell_bag in enumerate(cells): bag.add(cell_bag._cell) if i == 0: bag.table = cell_bag.table else: assert bag.table == cell_bag.table return bag
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def fill(self, direction, stop_before=None): """Should give the same output as fill, except it doesn't support non-cardinal directions or stop_before. Twenty times faster than fill in test_ravel.""" if direction in (UP_RIGHT, DOWN_RIGHT, UP_LEFT, UP_RIGHT): return self._fill(direction, stop_before) def what_to_get(cell): """converts bag coordinates into thing to pass to get_at""" cell_coord = (cell.x, cell.y) retval = [] for cell_coord, direction_coord in zip(cell_coord, direction): if direction_coord != 0: retval.append(None) else: retval.append(cell_coord) return tuple(retval) # TODO yuck if direction not in (UP, RIGHT, DOWN, LEFT): raise ValueError("Must be a cardinal direction!") ### this is what same_row/col should look like! small_table = None for cell in self.unordered_cells: got_rowcol = self.table.get_at(*what_to_get(cell)) if small_table: small_table = small_table.union(got_rowcol) else: small_table = got_rowcol if small_table is None: small_table = Bag(table=self.table) # now we use the small_table as if it was the table. (left_right, up_down) = direction bag = small_table.select_other( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down, self ) if stop_before is not None: return bag.stop_before(stop_before) else: return bag
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def _fill(self, direction, stop_before=None): """ If the bag contains only one cell, select all cells in the direction given, excluding the original cell. For example, from a column heading cell, you can "fill down" to get all the values underneath it. If you provide a stop_before function, it will be called on each cell as a stop condition. For example, if you provide a stop_before function which tests cell.value for an empty string. This would stop the fill function before it reaches the bottom of the sheet, for example. """ raise DeprecationWarning("2D fill is deprecated. Yell if you need it.") if direction not in (UP, RIGHT, DOWN, LEFT, UP_RIGHT, DOWN_RIGHT, UP_LEFT, DOWN_LEFT): raise ValueError("Invalid direction! Use one of UP, RIGHT, " "DOWN_RIGHT etc") (left_right, up_down) = direction bag = self.select( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down ) if stop_before is not None: # NOTE(PMF): stop_before is limited to singleton bags, in the DOWN # or RIGHT direction. This isn't ideal, but with the above "magic" # cmp code I can't think of an elegant general way of doing this. I # also can't imagine what it means to run fill in multiple # directions, or with non singleton bags. TODO: Constrain? if direction not in (DOWN, RIGHT): raise ValueError("Oops, stop_before only works down or right!") self.assert_one("You can't use stop_before for bags with more than" " one cell inside.") return Bag.from_list(list( takewhile(lambda c: not stop_before(c), bag))) return bag
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def waffle(self, other, *args, **kwargs): bag = Bag(table=self.table) for (selfbag, otherbag, junction_cell) in self.junction(other, *args, **kwargs): bag.add(junction_cell._cell) return bag
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def extrude(self, dx, dy): """ Extrude all cells in the bag by (dx, dy), by looking For example, given the bag with a cell at (0, 0): {(0, 0)} .extrude(2, 0) gives the bag with the cells (to the right): {(0, 0), (1, 0), (2, 0)} .extrude(0, -2) gives the bag with the cells (up): {(0, 0), (0, -1), (0, -2)} """ if dx < 0: dxs = list(range(0, dx - 1, -1)) else: dxs = list(range(0, dx + 1, +1)) if dy < 0: dys = list(range(0, dy - 1, -1)) else: dys = list(range(0, dy + 1, +1)) bag = Bag(table=self.table) for cell in self.unordered_cells: for i, j in product(dxs, dys): bag.add(self.table.get_at(cell.x + i, cell.y + j)._cell) return bag
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def same_col(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap all_x = set() for cell in bag.unordered_cells: all_x.add(cell.x) return self.filter(lambda c: c.x in all_x)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def __init__(self, name=""): super(Table, self).__init__(table=self) self._x_index = defaultdict(lambda: Bag(self)) self._y_index = defaultdict(lambda: Bag(self)) self._max_x = -1 self._max_y = -1 self.sheet = None self.name = name
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def rows(self): """Get bags containing each row's cells, in order""" for row_num in range(0, self._max_y + 1): # inclusive yield self._y_index[row_num]
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def col(self, column): if isinstance(column, six.string_types): c_num = contrib_excel.excel_column_number(column, index=0) return self.col(c_num) else: assert isinstance(column, int) return self._x_index[column]
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def get_at(self, x=None, y=None): """Directly get a singleton bag via indices. Faster than Bag.filter""" # we use .get() here to avoid new empty Bags being inserted # into the index stores when a non-existant coordinate is requested. assert isinstance(x, int) or x is None, "get_at takes integers (got {!r})".format(x) assert isinstance(y, int) or y is None, "get_at takes integers (got {!r})".format(y) if x is None and y is None: raise TypeError('get_at requires at least one x or y value') if x is None: return self._y_index.get(y, Bag(self)) if y is None: return self._x_index.get(x, Bag(self)) return self._y_index.get((y), Bag(self)).filter(lambda cell: cell.x==x)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def from_filename(filename, table_name=None, table_index=None): """Wrapper around from_file_object to handle extension extraction""" # NOTE: this is a messytables table name extension = os.path.splitext(filename)[1].strip('.') with open(filename, 'rb') as f: return Table.from_file_object(f, extension, table_name=table_name, table_index=table_index)
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def from_file_object(fobj, extension='', table_name=None, table_index=None): """Load table from file object, you must specify a table's name or position number. If you don't know these, try from_messy.""" # NOTE this is a messytables table name if (table_name is not None and table_index is not None) or \ (table_name is None and table_index is None): raise TypeError("Must give exactly one of table_name, table_index") table_set = messytables.any.any_tableset(fobj, extension=extension) if table_name is not None: return Table.from_messy(table_set[table_name]) elif table_index is not None: return Table.from_messy(table_set.tables[table_index])
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def from_messy(messy_rowset): """Import a rowset (table) from messytables, e.g. to work with each table in turn: tables = messytables.any.any_tableset(fobj) for mt_table in tables: xy_table = xypath.Table.from_messy(mt_table) ...""" assert isinstance(messy_rowset, messytables.core.RowSet),\ "Expected a RowSet, got a %r" % type(messy_rowset) new_table = Table.from_iterable( messy_rowset, value_func=lambda cell: cell.value, properties_func=lambda cell: cell.properties, name=messy_rowset.name) if hasattr(messy_rowset, 'sheet'): new_table.sheet = messy_rowset.sheet return new_table
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def from_iterable(table, value_func=lambda cell: cell, properties_func=lambda cell: {}, name=None): """Make a table from a pythonic table structure. The table must be an iterable which returns rows (in top-to-bottom order), which in turn are iterables which returns cells (in left-to-right order). value_func and properties_func specify how the cell maps onto an _XYCell's value and properties. The defaults assume that you have a straight-forward list of lists of values.""" new_table = Table(name=name) for y, row in enumerate(table): for x, cell in enumerate(row): new_table.add( _XYCell( value_func(cell), x, y, new_table, properties_func(cell))) return new_table
scraperwiki/xypath
[ 44, 6, 44, 8, 1369323312 ]
def _environ_path(name): """Split an environment variable into a path-like list elements""" if name in os.environ: return os.environ[name].split(":") return []
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def __init__(self, path): super(LibraryLoader.Lookup, self).__init__() self.access = dict(cdecl=ctypes.CDLL(path, self.mode))
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def has(self, name, calling_convention="cdecl"): """Return True if this given calling convention finds the given 'name'""" if calling_convention not in self.access: return False return hasattr(self.access[calling_convention], name)
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def __init__(self): self.other_dirs = []
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def getpaths(self, libname): """Return a list of paths where the library might be found.""" if os.path.isabs(libname): yield libname else: # search through a prioritized series of locations for the library # we first search any specific directories identified by user for dir_i in self.other_dirs: for fmt in self.name_formats: # dir_i should be absolute already yield os.path.join(dir_i, fmt % libname) # then we search the directory where the generated python interface is stored for fmt in self.name_formats: yield os.path.abspath(os.path.join(os.path.dirname(__file__), fmt % libname)) # now, use the ctypes tools to try to find the library for fmt in self.name_formats: path = ctypes.util.find_library(fmt % libname) if path: yield path # then we search all paths identified as platform-specific lib paths for path in self.getplatformpaths(libname): yield path # Finally, we'll try the users current working directory for fmt in self.name_formats: yield os.path.abspath(os.path.join(os.path.curdir, fmt % libname))
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def getplatformpaths(self, libname): if os.path.pathsep in libname: names = [libname] else: names = [fmt % libname for fmt in self.name_formats] for directory in self.getdirs(libname): for name in names: yield os.path.join(directory, name)
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def getdirs(libname): """Implements the dylib search as specified in Apple documentation: http://developer.apple.com/documentation/DeveloperTools/Conceptual/ DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html Before commencing the standard search, the method first checks the bundle's ``Frameworks`` directory if the application is running within a bundle (OS X .app). """ dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH") if not dyld_fallback_library_path: dyld_fallback_library_path = [os.path.expanduser("~/lib"), "/usr/local/lib", "/usr/lib"] dirs = [] if "/" in libname: dirs.extend(_environ_path("DYLD_LIBRARY_PATH")) else: dirs.extend(_environ_path("LD_LIBRARY_PATH")) dirs.extend(_environ_path("DYLD_LIBRARY_PATH")) if hasattr(sys, "frozen") and getattr(sys, "frozen") == "macosx_app": dirs.append(os.path.join(os.environ["RESOURCEPATH"], "..", "Frameworks")) dirs.extend(dyld_fallback_library_path) return dirs
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def __init__(self): dict.__init__(self) self.order = 0
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def extend(self, directories): """Add a list of directories to our set""" for a_dir in directories: self.add(a_dir)
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def _get_ld_so_conf_dirs(self, conf, dirs): """ Recursive function to help parse all ld.so.conf files, including proper handling of the `include` directive. """ try: with open(conf) as fileobj: for dirname in fileobj: dirname = dirname.strip() if not dirname: continue match = self._include.match(dirname) if not match: dirs.add(dirname) else: for dir2 in glob.glob(match.group("pattern")): self._get_ld_so_conf_dirs(dir2, dirs) except IOError: pass
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def getplatformpaths(self, libname): if self._ld_so_cache is None: self._create_ld_so_cache() result = self._ld_so_cache.get(libname, set()) for i in result: # we iterate through all found paths for library, since we may have # actually found multiple architectures or other library types that # may not load yield i
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def __init__(self, path): super(WindowsLibraryLoader.Lookup, self).__init__(path) self.access["stdcall"] = ctypes.windll.LoadLibrary(path)
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def add_library_search_dirs(other_dirs): """ Add libraries to search paths. If library paths are relative, convert them to absolute with respect to this file's directory """ for path in other_dirs: if not os.path.isabs(path): path = os.path.abspath(path) load_library.other_dirs.append(path)
davidjamesca/ctypesgen
[ 236, 74, 236, 23, 1426209562 ]
def parse_flow(flow_line): tmp = flow_line.split() if flow_line.find(':') > 0 and len(tmp) > 8: # IPv6 layout return { 'BKT':tmp[0], 'Prot':tmp[1], 'flowid':tmp[2], 'Source':tmp[3], 'Destination':tmp[4], 'pkt':int(tmp[5]) if tmp[5].isdigit() else 0, 'bytes':int(tmp[6]) if tmp[6].isdigit() else 0, 'drop_pkt':int(tmp[7]) if tmp[7].isdigit() else 0, 'drop_bytes':int(tmp[8]) if tmp[8].isdigit() else 0, } elif len(tmp) > 7: return { 'BKT':tmp[0], 'Prot':tmp[1], 'Source':tmp[2], 'Destination':tmp[3], 'pkt':int(tmp[4]) if tmp[4].isdigit() else 0, 'bytes':int(tmp[5]) if tmp[5].isdigit() else 0, 'drop_pkt':int(tmp[6]) if tmp[6].isdigit() else 0, 'drop_bytes':int(tmp[7]) if tmp[7].isdigit() else 0 }
opnsense/core
[ 2303, 592, 2303, 176, 1418485430 ]
def trim_dict(payload): for key in payload: if type(payload[key]) == str: payload[key] = payload[key].strip() elif type(payload[key]) == dict: trim_dict(payload[key]) return payload
opnsense/core
[ 2303, 592, 2303, 176, 1418485430 ]
def parse_ipfw_queues(): result = dict() queuetxt = subprocess.run(['/sbin/ipfw', 'queue', 'show'], capture_output=True, text=True).stdout.strip() current_queue = None current_queue_header = False for line in ("%s\nq000000X" % queuetxt).split('\n'): if len(line) == 0: continue if line[0] == 'q': m = parse_flowset_params(line) if current_queue: result[current_queue['flow_set_nr']] = current_queue if m: current_queue = m.groupdict() current_queue['flows'] = list() elif line.find('__Source') > 0: current_queue_header = True else: flow_stats = parse_flow(line) if flow_stats: current_queue['flows'].append(flow_stats) return trim_dict(result)
opnsense/core
[ 2303, 592, 2303, 176, 1418485430 ]
def iter_lamps(acad, objects): for obj in acad.iter_objects(('MText', 'MLeader'), block=objects): try: text = obj.TextString except Exception: continue text = utils.unformat_mtext(text) m = re.search(ur'(?P<num>\d+)(?P<mark>.*?)\\S(?P<num_power>.*?)/.*?;', text) if not m: continue print m.group('num'), m.group('mark'), m.group('num_power') yield LampEntry(m.group('num'), m.group('mark'), m.group('num_power'))
reclosedev/pyautocad
[ 375, 132, 375, 36, 1338625206 ]
def main(): acad = Autocad() objects = None if 'i' in sys.argv[1:]: objects = acad.get_selection('Select objects') lamps = defaultdict(int) for lamp in iter_lamps(acad, objects): lamps[lamp.mark] += int(lamp.number) print '-' * 79 for mark, number in sorted(lamps.iteritems()): print '%-20s | %s' % (mark, number)
reclosedev/pyautocad
[ 375, 132, 375, 36, 1338625206 ]
def __init__(self, actions, historystorage, modelstorage, delta=0.1, p_min=None, max_rounds=10000): super(Exp4P, self).__init__(historystorage, modelstorage, actions) self.n_total = 0 # number of actions (i.e. K in the paper) self.n_actions = len(self._actions) self.max_rounds = max_rounds # delta > 0 if not isinstance(delta, float): raise ValueError("delta should be float, the one" "given is: %f" % p_min) self.delta = delta # p_min in [0, 1/k] if p_min is None: self.p_min = np.sqrt(np.log(10) / self.n_actions / self.max_rounds) elif not isinstance(p_min, float): raise ValueError("p_min should be float, the one" "given is: %f" % p_min) elif (p_min < 0) or (p_min > (1. / self.n_actions)): raise ValueError("p_min should be in [0, 1/k], the one" "given is: %f" % p_min) else: self.p_min = p_min # Initialize the model storage model = { # probability distribution for action recommendation 'action_probs': {}, # weight vector for each expert 'w': {}, } self._modelstorage.save_model(model)
ntucllab/striatum
[ 104, 35, 104, 11, 1462947998 ]
def get_action(self, context=None, n_actions=1): """Return the action to perform Parameters ---------- context : dictionary Contexts {expert_id: {action_id: expert_prediction}} of different actions. n_actions: int Number of actions wanted to recommend users. Returns ------- history_id : int The history id of the action. action_recommendation : list of dictionaries In each dictionary, it will contains {Action object, estimated_reward, uncertainty}. """ estimated_reward, uncertainty, score = self._exp4p_score(context) action_recommendation = [] action_recommendation_ids = sorted(score, key=score.get, reverse=True)[:n_actions] for action_id in action_recommendation_ids: action = self.get_action_with_id(action_id) action_recommendation.append({ 'action': action, 'estimated_reward': estimated_reward[action_id], 'uncertainty': uncertainty[action_id], 'score': score[action_id], }) self.n_total += 1 history_id = self._historystorage.add_history( context, action_recommendation, reward=None) return history_id, action_recommendation
ntucllab/striatum
[ 104, 35, 104, 11, 1462947998 ]
def get_max_datagram_size(): #TODO return 1024
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def __init__(self, port=0): """ ctor """ self.port = port self.buff_size = get_max_datagram_size()
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def init_server(self): """ Initialize and start the server thread, if not already initialized. """ if self.server is not None: return if rosgraph.network.use_ipv6(): s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) else: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((rosgraph.network.get_bind_address(), self.port)) if self.port == 0: self.port = s.getsockname()[1] self.server = s threading.start_new_thread(self.run, ())
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def shutdown(self): if self.sock is not None: self.sock.close()
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def supports(self, protocol): """ @param protocol: name of protocol @type protocol: str @return: True if protocol is supported @rtype: bool """ return protocol == UDPROS
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def get_supported(self): """ Get supported protocols """ return [[UDPROS]]
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def init_publisher(self, topic_name, protocol_params): """ Initialize this node to start publishing to a new UDP location.
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def topic_connection_handler(self, sock, client_addr, header): """ Process incoming topic connection. Reads in topic name from handshake and creates the appropriate L{TCPROSPub} handler for the connection. @param sock: socket connection @type sock: socket.socket @param client_addr: client address @type client_addr: (str, int) @param header: key/value pairs from handshake header @type header: dict @return: error string or None @rtype: str """ for required in ['topic', 'md5sum', 'callerid']: if not required in header: return "Missing required '%s' field"%required else: resolved_topic_name = header['topic'] md5sum = header['md5sum'] tm = rospy.registration.get_topic_manager() topic = tm.get_publisher_impl(resolved_topic_name) if not topic: return "[%s] is not a publisher of [%s]. Topics are %s"%(rospy.names.get_caller_id(), resolved_topic_name, tm.get_publications()) elif md5sum != rospy.names.TOPIC_ANYTYPE and md5sum != topic.data_class._md5sum: actual_type = topic.data_class._type # check to see if subscriber sent 'type' header. If they did, check that # types are same first as this provides a better debugging message if 'type' in header: requested_type = header['type'] if requested_type != actual_type: return "topic types do not match: [%s] vs. [%s]"%(requested_type, actual_type) else: # defaults to actual type requested_type = actual_type return "Client [%s] wants topic [%s] to have datatype/md5sum [%s/%s], but our version has [%s/%s] Dropping connection."%(header['callerid'], resolved_topic_name, requested_type, md5sum, actual_type, topic.data_class._md5sum) else: #TODO:POLLING if polling header is present, have to spin up receive loop as well # #1334: tcp_nodelay support from subscriber option if 'tcp_nodelay' in header: tcp_nodelay = True if header['tcp_nodelay'].strip() == '1' else False else: tcp_nodelay = self.tcp_nodelay_map.get(resolved_topic_name, False) _configure_pub_socket(sock, tcp_nodelay) protocol = TCPROSPub(resolved_topic_name, topic.data_class, is_latch=topic.is_latch, headers=topic.headers) transport = TCPROSTransport(protocol, resolved_topic_name) transport.set_socket(sock, header['callerid']) transport.write_header() topic.add_connection(transport)
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def __init__(self, protocol, name, header): """ ctor @param name: topic name @type name: str: @param protocol: protocol implementation @param protocol: UDPROSTransportProtocol @param header: handshake header if transport handshake header was already read off of transport. @type header: dict @throws TransportInitError: if transport cannot be initialized according to arguments """ super(UDPROSTransport, self).__init__(protocol.direction, name=name) if not name: raise TransportInitError("Unable to initialize transport: name is not set") self.done = False self.header = header
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def send_message(self, msg, seq): """ Convenience routine for services to send a message across a particular connection. NOTE: write_data is much more efficient if same message is being sent to multiple connections. Not threadsafe. @param msg: message to send @type msg: Msg @param seq: sequence number for message @type seq: int @raise TransportException: if error occurred sending message """ # this will call write_data(), so no need to keep track of stats serialize_message(self.write_buff, seq, msg) self.write_data(self.write_buff.getvalue()) self.write_buff.truncate(0)
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def receive_once(self): """ block until messages are read off of socket @return: list of newly received messages @rtype: [Msg] @raise TransportException: if unable to receive message due to error """ pass
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def receive_loop(self, msgs_callback): pass
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def close(super): self(UDPROSTransport, self).close() #TODO self.done = True
MangoMangoDevelopment/neptune
[ 8, 4, 8, 2, 1454524069 ]
def __init__(self, connection, queue=None, exchange=None, routing_key=None, **kwargs): self.connection = connection self.backend = kwargs.get("backend", None) if not self.backend: self.backend = self.connection.create_backend() self.queue = queue or self.queue # Binding. self.queue = queue or self.queue self.exchange = exchange or self.exchange self.routing_key = routing_key or self.routing_key self.callbacks = [] # Options for opt_name in self._init_opts: opt_value = kwargs.get(opt_name) if opt_value is not None: setattr(self, opt_name, opt_value) # exclusive implies auto-delete. if self.exclusive: self.auto_delete = True self.consumer_tag = self._generate_consumer_tag() if self.auto_declare: self.declare()
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def __exit__(self, e_type, e_value, e_trace): if e_type: raise e_type(e_value) self.close()
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def _generate_consumer_tag(self): """Generate a unique consumer tag. :rtype string: """ return "%s.%s%s" % ( self.__class__.__module__, self.__class__.__name__, self._next_consumer_tag())
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def _receive_callback(self, raw_message): """Internal method used when a message is received in consume mode.""" message = self.backend.message_to_python(raw_message) if self.auto_ack and not message.acknowledged: message.ack() self.receive(message.payload, message)
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def process_next(self): """**DEPRECATED** Use :meth:`fetch` like this instead: >>> message = self.fetch(enable_callbacks=True) """ warnings.warn(DeprecationWarning( "Consumer.process_next has been deprecated in favor of \ Consumer.fetch(enable_callbacks=True)")) return self.fetch(enable_callbacks=True)
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def register_callback(self, callback): """Register a callback function to be triggered by :meth:`receive`. The ``callback`` function must take two arguments: * message_data The deserialized message data * message The :class:`carrot.backends.base.BaseMessage` instance. """ self.callbacks.append(callback)
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def discard_all(self, filterfunc=None): """Discard all waiting messages. :param filterfunc: A filter function to only discard the messages this filter returns. :returns: the number of messages discarded. *WARNING*: All incoming messages will be ignored and not processed. Example using filter: >>> def waiting_feeds_only(message): ... try: ... message_data = message.decode() ... except: # Should probably be more specific. ... pass ... ... if message_data.get("type") == "feed": ... return True ... else: ... return False """ if not filterfunc: return self.backend.queue_purge(self.queue) if self.no_ack or self.auto_ack: raise Exception("discard_all: Can't use filter with auto/no-ack.") discarded_count = 0 while True: message = self.fetch() if message is None: return discarded_count if filterfunc(message): message.ack() discarded_count += 1
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def consume(self, no_ack=None): """Declare consumer.""" no_ack = no_ack or self.no_ack self.backend.declare_consumer(queue=self.queue, no_ack=no_ack, callback=self._receive_callback, consumer_tag=self.consumer_tag, nowait=True) self.channel_open = True
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def iterqueue(self, limit=None, infinite=False): """Infinite iterator yielding pending messages, by using synchronous direct access to the queue (``basic_get``). :meth:`iterqueue` is used where synchronous functionality is more important than performance. If you can, use :meth:`iterconsume` instead. :keyword limit: If set, the iterator stops when it has processed this number of messages in total. :keyword infinite: Don't raise :exc:`StopIteration` if there is no messages waiting, but return ``None`` instead. If infinite you obviously shouldn't consume the whole iterator at once without using a ``limit``. :raises StopIteration: If there is no messages waiting, and the iterator is not infinite. """ for items_since_start in count(): item = self.fetch() if (not infinite and item is None) or \ (limit and items_since_start >= limit): raise StopIteration yield item
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def close(self): """Close the channel to the queue.""" self.cancel() self.backend.close() self._closed = True
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): """Request specific Quality of Service. This method requests a specific quality of service. The QoS can be specified for the current channel or for all channels on the connection. The particular properties and semantics of a qos method always depend on the content class semantics. Though the qos method could in principle apply to both peers, it is currently meaningful only for the server. :param prefetch_size: Prefetch window in octets. The client can request that messages be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. This field specifies the prefetch window size in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls into other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. The ``prefetch_size`` is ignored if the :attr:`no_ack` option is set. :param prefetch_count: Specifies a prefetch window in terms of whole messages. This field may be used in combination with ``prefetch_size``; A message will only be sent in advance if both prefetch windows (and those at the channel and connection level) allow it. The prefetch- count is ignored if the :attr:`no_ack` option is set. :keyword apply_global: By default the QoS settings apply to the current channel only. If this is set, they are applied to the entire connection. """ return self.backend.qos(prefetch_size, prefetch_count, apply_global)
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def __init__(self, connection, exchange=None, routing_key=None, **kwargs): self.connection = connection self.backend = self.connection.create_backend() self.exchange = exchange or self.exchange self.routing_key = routing_key or self.routing_key for opt_name in self._init_opts: opt_value = kwargs.get(opt_name) if opt_value is not None: setattr(self, opt_name, opt_value) self.delivery_mode = self.DELIVERY_MODES.get(self.delivery_mode, self.delivery_mode) self._closed = False if self.auto_declare and self.exchange: self.declare()
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def __enter__(self): return self
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def create_message(self, message_data, delivery_mode=None, priority=None, content_type=None, content_encoding=None, serializer=None): """With any data, serialize it and encapsulate it in a AMQP message with the proper headers set.""" delivery_mode = delivery_mode or self.delivery_mode # No content_type? Then we're serializing the data internally. if not content_type: serializer = serializer or self.serializer (content_type, content_encoding, message_data) = serialization.encode(message_data, serializer=serializer) else: # If the programmer doesn't want us to serialize, # make sure content_encoding is set. if isinstance(message_data, unicode): if not content_encoding: content_encoding = 'utf-8' message_data = message_data.encode(content_encoding) # If they passed in a string, we can't know anything # about it. So assume it's binary data. elif not content_encoding: content_encoding = 'binary' return self.backend.prepare_message(message_data, delivery_mode, priority=priority, content_type=content_type, content_encoding=content_encoding)
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def close(self): """Close connection to queue.""" self.backend.close() self._closed = True
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def __init__(self, connection, **kwargs): self.connection = connection self.exchange = kwargs.get("exchange", self.exchange) self.queue = kwargs.get("queue", self.queue) self.routing_key = kwargs.get("routing_key", self.routing_key) self.publisher = self.publisher_cls(connection, exchange=self.exchange, routing_key=self.routing_key) self.consumer = self.consumer_cls(connection, queue=self.queue, exchange=self.exchange, routing_key=self.routing_key) self.consumer.register_callback(self.receive) self.callbacks = [] self._closed = False
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def __exit__(self, e_type, e_value, e_trace): if e_type: raise e_type(e_value) self.close()
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def receive(self, message_data, message): """See :meth:`Consumer.receive`""" if not self.callbacks: raise NotImplementedError("No consumer callbacks registered") for callback in self.callbacks: callback(message_data, message)
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def fetch(self, **kwargs): """See :meth:`Consumer.fetch`""" return self.consumer.fetch(**kwargs)
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def __init__(self, connection, from_dict=None, consumers=None, callbacks=None, **options): self.connection = connection self.options = options self.from_dict = from_dict or {} self.consumers = [] self.callbacks = callbacks or [] self._open_consumers = {} self.backend = self.connection.create_backend() self.auto_ack = options.get("auto_ack", self.auto_ack) if consumers: [self.add_consumer(consumer) for consumer in consumers] [self.add_consumer_from_dict(queue_name, **queue_options) for queue_name, queue_options in self.from_dict.items()]
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def add_consumer_from_dict(self, queue, **options): """Add another consumer from dictionary configuration.""" options.setdefault("routing_key", options.pop("binding_key", None)) consumer = Consumer(self.connection, queue=queue, backend=self.backend, **options) self.consumers.append(consumer) return consumer
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]
def register_callback(self, callback): """Register new callback to be called when a message is received. See :meth:`Consumer.register_callback`""" self.callbacks.append(callback)
ask/carrot
[ 196, 34, 196, 12, 1237911887 ]