code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _assert_obj_type(pub, name="pub", obj_type=DBPublication): if not isinstance(pub, obj_type): raise InvalidType( "`%s` have to be instance of %s, not %s!" % ( name, obj_type.__name__, pub.__class__.__name__ ) )
Make sure, that `pub` is instance of the `obj_type`. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. obj_type (class): Class of which the `pub` should be instance. Default :class:`.DBPublication`. Raises: InvalidType: When the `pub` is not instance of `obj_type`.
def save(self, *args, **kwargs): if not self.pk: old_votes = Vote.objects.filter(user=self.user, node=self.node) for old_vote in old_votes: old_vote.delete() super(Vote, self).save(*args, **kwargs)
ensure users cannot vote the same node multiple times but let users change their votes
def from_session(cls, session): session.error_wrapper = lambda e: NvimError(e[1]) channel_id, metadata = session.request(b'vim_get_api_info') if IS_PYTHON3: metadata = walk(decode_if_bytes, metadata) types = { metadata['types']['Buffer']['id']: Buffer, metadata['types']['Window']['id']: Window, metadata['types']['Tabpage']['id']: Tabpage, } return cls(session, channel_id, metadata, types)
Create a new Nvim instance for a Session instance. This method must be called to create the first Nvim instance, since it queries Nvim metadata for type information and sets a SessionHook for creating specialized objects from Nvim remote handles.
def authenticate(self, username, password): self._username = username self._password = password self.disconnect() self._open_connection() return self.authenticated
Authenticate user on server. :param username: Username used to be authenticated. :type username: six.string_types :param password: Password used to be authenticated. :type password: six.string_types :return: True if successful. :raises: InvalidCredentials, AuthenticationNotSupported, MemcachedException :rtype: bool
def order_percent(self, asset, percent, limit_price=None, stop_price=None, style=None): if not self._can_order_asset(asset): return None amount = self._calculate_order_percent_amount(asset, percent) return self.order(asset, amount, limit_price=limit_price, stop_price=stop_price, style=style)
Place an order in the specified asset corresponding to the given percent of the current portfolio value. Parameters ---------- asset : Asset The asset that this order is for. percent : float The percentage of the portfolio value to allocate to ``asset``. This is specified as a decimal, for example: 0.50 means 50%. limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_value`
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return AddOnResultInstance( self._version, payload, account_sid=self._solution['account_sid'], reference_sid=self._solution['reference_sid'], sid=self._solution['sid'], )
Fetch a AddOnResultInstance :returns: Fetched AddOnResultInstance :rtype: twilio.rest.api.v2010.account.recording.add_on_result.AddOnResultInstance
def DbGetAttributeAliasList(self, argin): self._log.debug("In DbGetAttributeAliasList()") if not argin: argin = "%" else: argin = replace_wildcard(argin) return self.db.get_attribute_alias_list(argin)
Get attribute alias list for a specified filter :param argin: attribute alias filter string (eg: att*) :type: tango.DevString :return: attribute aliases :rtype: tango.DevVarStringArray
def get_axis(self, undefined=np.zeros(3)): tolerance = 1e-17 self._normalise() norm = np.linalg.norm(self.vector) if norm < tolerance: return undefined else: return self.vector / norm
Get the axis or vector about which the quaternion rotation occurs For a null rotation (a purely real quaternion), the rotation angle will always be `0`, but the rotation axis is undefined. It is by default assumed to be `[0, 0, 0]`. Params: undefined: [optional] specify the axis vector that should define a null rotation. This is geometrically meaningless, and could be any of an infinite set of vectors, but can be specified if the default (`[0, 0, 0]`) causes undesired behaviour. Returns: A Numpy unit 3-vector describing the Quaternion object's axis of rotation. Note: This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
def spliceext(filepath, s): root, ext = os.path.splitext(safepath(filepath)) return root + s + ext
Add s into filepath before the extension Args: filepath (str, path): file path s (str): string to splice Returns: str
def create_package_node(self, team, user, package, dry_run=False): contents = RootNode(dict()) if dry_run: return contents self.check_name(team, user, package) assert contents is not None self.create_dirs() path = self.package_path(team, user, package) try: os.remove(path) except OSError: pass return contents
Creates a new package and initializes its contents. See `install_package`.
def p_flatten(self, obj, **kwargs): if isinstance(obj, six.string_types): return obj result = "" for i in obj: result += self.p_flatten(i) return result
Flatten a list of lists of lists... of strings into a string This is usually used as the action for sequence expressions: .. code-block:: my_rule <- 'a' . 'c' {p_flatten} With the input "abc" and no action, this rule returns [ 'a', 'b', 'c']. { p_flatten } procuces "abc". >>> parser.p_flatten(['a', ['b', 'c']]) 'abc'
def CreateJarBuilder(env): try: java_jar = env['BUILDERS']['JarFile'] except KeyError: fs = SCons.Node.FS.get_default_fs() jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR') java_jar = SCons.Builder.Builder(action = jar_com, suffix = '$JARSUFFIX', src_suffix = '$JAVACLASSSUFFIX', src_builder = 'JavaClassFile', source_factory = fs.Entry) env['BUILDERS']['JarFile'] = java_jar return java_jar
The Jar builder expects a list of class files which it can package into a jar file. The jar tool provides an interface for passing other types of java files such as .java, directories or swig interfaces and will build them to class files in which it can package into the jar.
def save_xml(self, doc, element): for cond in self._targets: new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'targets') new_element.setAttributeNS(XSI_NS, XSI_NS_S + 'type', 'rtsExt:condition_ext') cond.save_xml(doc, new_element) element.appendChild(new_element)
Save this message_sending object into an xml.dom.Element object.
def find_path_with_profiles(self, conversion_profiles, in_, out): original_profiles = dict(self.conversion_profiles) self._setup_profiles(conversion_profiles) results = self.find_path(in_, out) self.conversion_profiles = original_profiles return results
Like find_path, except forces the conversion profiles to be the given conversion profile setting. Useful for "temporarily overriding" the global conversion profiles with your own.
def credit_card_owner(self, gender: Optional[Gender] = None) -> dict: owner = { 'credit_card': self.credit_card_number(), 'expiration_date': self.credit_card_expiration_date(), 'owner': self.__person.full_name(gender=gender).upper(), } return owner
Generate credit card owner. :param gender: Gender of credit card owner. :type gender: Gender's enum object. :return:
def bulk_activate(workers, lbn, profile='default'): ret = {} if isinstance(workers, six.string_types): workers = workers.split(',') for worker in workers: try: ret[worker] = worker_activate(worker, lbn, profile) except Exception: ret[worker] = False return ret
Activate all the given workers in the specific load balancer CLI Examples: .. code-block:: bash salt '*' modjk.bulk_activate node1,node2,node3 loadbalancer1 salt '*' modjk.bulk_activate node1,node2,node3 loadbalancer1 other-profile salt '*' modjk.bulk_activate ["node1","node2","node3"] loadbalancer1 salt '*' modjk.bulk_activate ["node1","node2","node3"] loadbalancer1 other-profile
def send(self, request, socket, context, *args): for handler, pattern in self.handlers: no_channel = not pattern and not socket.channels if self.name.endswith("subscribe") and pattern: matches = [pattern.match(args[0])] else: matches = [pattern.match(c) for c in socket.channels if pattern] if no_channel or filter(None, matches): handler(request, socket, context, *args)
When an event is sent, run all relevant handlers. Relevant handlers are those without a channel pattern when the given socket is not subscribed to any particular channel, or the handlers with a channel pattern that matches any of the channels that the given socket is subscribed to. In the case of subscribe/unsubscribe, match the channel arg being sent to the channel pattern.
def commentless(data): it = iter(data) while True: line = next(it) while ":" in line or not line.lstrip().startswith(".."): yield line line = next(it) indent = indent_size(line) it = itertools.dropwhile(lambda el: indent_size(el) > indent or not el.strip(), it)
Generator that removes from a list of strings the double dot reStructuredText comments and its contents based on indentation, removing trailing empty lines after each comment as well.
def update_db(self, giver, receiverkarma): for receiver in receiverkarma: if receiver != giver: urow = KarmaStatsTable( ude(giver), ude(receiver), receiverkarma[receiver]) self.db.session.add(urow) self.db.session.commit()
Record a the giver of karma, the receiver of karma, and the karma amount. Typically the count will be 1, but it can be any positive or negative integer.
def send_output(self, value, stdout): writer = self.writer if value is not None: writer.write('{!r}\n'.format(value).encode('utf8')) if stdout: writer.write(stdout.encode('utf8')) yield from writer.drain()
Write the output or value of the expression back to user. >>> 5 5 >>> print('cash rules everything around me') cash rules everything around me
def wait_for(self, pattern, timeout=None): should_continue = True if self.block: raise TypeError(NON_BLOCKING_ERROR_MESSAGE) def stop(signum, frame): nonlocal should_continue if should_continue: raise TimeoutError() if timeout: signal.signal(signal.SIGALRM, stop) signal.alarm(timeout) while should_continue: output = self.poll_output() + self.poll_error() filtered = [line for line in output if re.match(pattern, line)] if filtered: should_continue = False
Block until a pattern have been found in stdout and stderr Args: pattern(:class:`~re.Pattern`): The pattern to search timeout(int): Maximum number of second to wait. If None, wait infinitely Raises: TimeoutError: When timeout is reach
def stacked_node_layout(self,EdgeAttribute=None,network=None,NodeAttribute=None,\ nodeList=None,x_position=None,y_start_position=None,verbose=None): network=check_network(self,network,verbose=verbose) PARAMS=set_param(['EdgeAttribute','network','NodeAttribute','nodeList',\ 'x_position','y_start_position'],[EdgeAttribute,network,NodeAttribute,\ nodeList,x_position,y_start_position]) response=api(url=self.__url+"/stacked-node-layout", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Execute the Stacked Node Layout on a network. :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param x_position (string, optional): X start position, in numeric value :param y_start_position (string, optional): Y start position, in numeric va lue
def _is_skippable(filename_full): if not Settings.follow_symlinks and os.path.islink(filename_full): return True if os.path.basename(filename_full) == timestamp.RECORD_FILENAME: return True if not os.path.exists(filename_full): if Settings.verbose: print(filename_full, 'was not found.') return True return False
Handle things that are not optimizable files.
def handle_comment(self, comment): match = _COND_COMMENT_PATTERN.match(comment) if match is not None: cond = match.group(1) content = match.group(2) self._buffer.append(_COND_COMMENT_START_FORMAT % cond) self._push_status() self.feed(content) self._pop_status() self._buffer.append(_COND_COMMENT_END_FORMAT) elif not self.remove_comments: self._buffer.append(_COMMENT_FORMAT % comment)
Remove comment except IE conditional comment. .. seealso:: `About conditional comments <http://msdn.microsoft.com/en-us/library/ms537512.ASPX>`_
def installed(name, default=False, user=None): ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} if name.startswith('python-'): name = re.sub(r'^python-', '', name) if __opts__['test']: ret['comment'] = 'python {0} is set to be installed'.format(name) return ret ret = _check_pyenv(ret, user) if ret['result'] is False: if not __salt__['pyenv.install'](user): ret['comment'] = 'pyenv failed to install' return ret else: return _check_and_install_python(ret, name, default, user=user) else: return _check_and_install_python(ret, name, default, user=user)
Verify that the specified python is installed with pyenv. pyenv is installed if necessary. name The version of python to install default : False Whether to make this python the default. user: None The user to run pyenv as. .. versionadded:: 0.17.0 .. versionadded:: 0.16.0
def download_from_search(query_str, folder, do_extract_text=True, max_results=None): piis = get_piis(query_str) for pii in piis[:max_results]: if os.path.exists(os.path.join(folder, '%s.txt' % pii)): continue logger.info('Downloading %s' % pii) xml = download_article(pii, 'pii') sleep(1) if do_extract_text: txt = extract_text(xml) if not txt: continue with open(os.path.join(folder, '%s.txt' % pii), 'wb') as fh: fh.write(txt.encode('utf-8')) else: with open(os.path.join(folder, '%s.xml' % pii), 'wb') as fh: fh.write(xml.encode('utf-8')) return
Save raw text files based on a search for papers on ScienceDirect. This performs a search to get PIIs, downloads the XML corresponding to the PII, extracts the raw text and then saves the text into a file in the designated folder. Parameters ---------- query_str : str The query string to search with folder : str The local path to an existing folder in which the text files will be dumped do_extract_text : bool Choose whether to extract text from the xml, or simply save the raw xml files. Default is True, so text is extracted. max_results : int or None Default is None. If specified, limit the number of results to the given maximum.
def get_aa_letter(aa_code): aa_letter = 'X' for key, val in standard_amino_acids.items(): if val == aa_code: aa_letter = key return aa_letter
Get one-letter version of aa_code if possible. If not, return 'X'. Parameters ---------- aa_code : str Three-letter amino acid code. Returns ------- aa_letter : str One-letter aa code. Default value is 'X'.
def center(self, X): X = X.copy() inan = numpy.isnan(X) if self.mu is None: X_ = numpy.ma.masked_array(X, inan) self.mu = X_.mean(0).base self.sigma = X_.std(0).base reduce(lambda y,x: setitem(x[0], x[1], x[2]), zip(X.T, inan.T, self.mu), None) X = X - self.mu X = X / numpy.where(self.sigma == 0, 1e-30, self.sigma) return X
Center `X` in PCA space.
def resolutions(self): r_json = self._get_json('resolution') resolutions = [Resolution( self._options, self._session, raw_res_json) for raw_res_json in r_json] return resolutions
Get a list of resolution Resources from the server. :rtype: List[Resolution]
def write_info(self, w): w.write_b_varchar("") w.write_b_varchar(self._table_type.typ_schema) w.write_b_varchar(self._table_type.typ_name)
Writes TVP_TYPENAME structure spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx @param w: TdsWriter @return:
def display(self): pygame.init() self.display = pygame.display.set_mode((self.width,self.height)) self.display.blit(self.cloud,(0,0)) pygame.display.update() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() return
Displays the word cloud to the screen.
def mode(self, target, mode_string=None, tags=None): params = [target] if mode_string: params += mode_string self.send('MODE', params=params, source=self.nick, tags=tags)
Sends new modes to or requests existing modes from the given target.
def is_namespace_valid( namespace_id ): if not is_b40( namespace_id ) or "+" in namespace_id or namespace_id.count(".") > 0: return False if len(namespace_id) == 0 or len(namespace_id) > LENGTHS['blockchain_id_namespace_id']: return False return True
Is a namespace ID valid? >>> is_namespace_valid('abcd') True >>> is_namespace_valid('+abcd') False >>> is_namespace_valid('abc.def') False >>> is_namespace_valid('.abcd') False >>> is_namespace_valid('abcdabcdabcdabcdabcd') False >>> is_namespace_valid('abcdabcdabcdabcdabc') True
def deg2fmt(ra_deg, dec_deg, format): rhr, rmn, rsec = degToHms(ra_deg) dsgn, ddeg, dmn, dsec = degToDms(dec_deg) if format == 'hms': return rhr, rmn, rsec, dsgn, ddeg, dmn, dsec elif format == 'str': ra_txt = '%d:%02d:%06.3f' % (rhr, rmn, rsec) if dsgn < 0: dsgn = '-' else: dsgn = '+' dec_txt = '%s%d:%02d:%05.2f' % (dsgn, ddeg, dmn, dsec) return ra_txt, dec_txt
Format coordinates.
def register_api_doc_endpoints(config, endpoints, base_path='/api-docs'): for endpoint in endpoints: path = base_path.rstrip('/') + endpoint.path config.add_route(endpoint.route_name, path) config.add_view( endpoint.view, route_name=endpoint.route_name, renderer=endpoint.renderer)
Create and register pyramid endpoints to service swagger api docs. Routes and views will be registered on the `config` at `path`. :param config: a pyramid configuration to register the new views and routes :type config: :class:`pyramid.config.Configurator` :param endpoints: a list of endpoints to register as routes and views :type endpoints: a list of :class:`pyramid_swagger.model.PyramidEndpoint` :param base_path: the base path used to register api doc endpoints. Defaults to `/api-docs`. :type base_path: string
def get(self, recipe=None, plugin=None): if plugin is not None: if recipe is None: recipes_list = {} for key in self.recipes.keys(): if self.recipes[key].plugin == plugin: recipes_list[key] = self.recipes[key] return recipes_list else: if recipe in self.recipes.keys(): if self.recipes[recipe].plugin == plugin: return self.recipes[recipe] else: return None else: return None else: if recipe is None: return self.recipes else: if recipe in self.recipes.keys(): return self.recipes[recipe] else: return None
Get one or more recipes. :param recipe: Name of the recipe :type recipe: str :param plugin: Plugin object, under which the recipe was registered :type plugin: GwBasePattern
def drawBezier(page, p1, p2, p3, p4, color=None, fill=None, dashes=None, width=1, morph=None, closePath=False, roundCap=False, overlay=True): img = page.newShape() Q = img.drawBezier(Point(p1), Point(p2), Point(p3), Point(p4)) img.finish(color=color, fill=fill, dashes=dashes, width=width, roundCap=roundCap, morph=morph, closePath=closePath) img.commit(overlay) return Q
Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3.
def read_git_commit_timestamp(repo_path=None): repo = git.repo.base.Repo(path=repo_path, search_parent_directories=True) head_commit = repo.head.commit return head_commit.committed_datetime
Obtain the timestamp from the current head commit of a Git repository. Parameters ---------- repo_path : `str`, optional Path to the Git repository. Leave as `None` to use the current working directory. Returns ------- commit_timestamp : `datetime.datetime` The datetime of the head commit.
def _get_meta(self, row, col): if self.meta is None: logging.error("unable to get meta: empty section") return {} if not row in self._get_row_hdrs() or\ not col in self._get_col_hdrs(): logging.error("unable to get meta: cell [%s,%s] does not exist" % (row, col)) return {} meta_str = self.meta[col][self.irt[row]] try: meta = ast.literal_eval(meta_str) if isinstance(meta, dict): return meta except (SyntaxError, ValueError), e: logging.error("unable to parse meta string - %s: %s" % (meta_str, e)) return {}
Get metadata for a particular cell
def extract_lzma(path): tlfile = pathlib.Path(path) with tlfile.open("rb") as td: data = lzma.decompress(td.read()) fd, tmpname = tempfile.mkstemp(prefix="odt_ex_", suffix=".tar") with open(fd, "wb") as fo: fo.write(data) return tmpname
Extract an lzma file and return the temporary file name
def in_same_box(self, a, b): assert a in self.micro_indices assert b in self.micro_indices for part in self.partition: if a in part and b in part: return True return False
Return ``True`` if nodes ``a`` and ``b``` are in the same box.
def connect_mysql(host, port, user, password, database): return pymysql.connect( host=host, port=port, user=user, passwd=password, db=database )
Connect to MySQL with retries.
def as_categorical(self): if len(self.shape) > 1: raise ValueError("Can't convert a 2D array to a categorical.") with ignore_pandas_nan_categorical_warning(): return pd.Categorical.from_codes( self.as_int_array(), self.categories.copy(), ordered=False, )
Coerce self into a pandas categorical. This is only defined on 1D arrays, since that's all pandas supports.
def _identify_os(self, msg): ret = [] for dev_os, data in self.compiled_prefixes.items(): log.debug('Matching under %s', dev_os) msg_dict = self._identify_prefix(msg, data) if msg_dict: log.debug('Adding %s to list of matched OS', dev_os) ret.append((dev_os, msg_dict)) else: log.debug('No match found for %s', dev_os) if not ret: log.debug('Not matched any OS, returning original log') msg_dict = {'message': msg} ret.append((None, msg_dict)) return ret
Using the prefix of the syslog message, we are able to identify the operating system and then continue parsing.
def as_dict(self): data = super(BaseEmail, self).as_dict() data["Headers"] = [{"Name": name, "Value": value} for name, value in data["Headers"].items()] for field in ("To", "Cc", "Bcc"): if field in data: data[field] = list_to_csv(data[field]) data["Attachments"] = [prepare_attachments(attachment) for attachment in data["Attachments"]] return data
Additionally encodes headers. :return:
def _to_linear(M, N, L, q): "Converts a qubit in chimera coordinates to its linear index." (x, y, u, k) = q return 2 * L * N * x + 2 * L * y + L * u + k
Converts a qubit in chimera coordinates to its linear index.
def calculateRange(self): if not self.autoRangeCti or not self.autoRangeCti.configValue: return (self.rangeMinCti.data, self.rangeMaxCti.data) else: rangeFunction = self._rangeFunctions[self.autoRangeMethod] return rangeFunction()
Calculates the range depending on the config settings.
def maybe_coroutine(decide): def _maybe_coroutine(f): @functools.wraps(f) def __maybe_coroutine(*args, **kwargs): if decide(*args, **kwargs): return coroutine(f)(*args, **kwargs) else: return no_coroutine(f)(*args, **kwargs) return __maybe_coroutine return _maybe_coroutine
Either be a coroutine or not. Use as a decorator: @maybe_coroutine(lambda maybeAPromise: return isinstance(maybeAPromise, Promise)) def foo(maybeAPromise): result = yield maybeAPromise print("hello") return result The function passed should be a generator yielding either only Promises or whatever you feel like. The decide parameter must be a function which gets called with the same parameters as the function to decide whether this is a coroutine or not. Using this it is possible to either make the function a coroutine or not based on a parameter to the function call. Let's explain the example above: # If the maybeAPromise is an instance of Promise, # we want the foo function to act as a coroutine. # If the maybeAPromise is not an instance of Promise, # we want the foo function to act like any other normal synchronous function. @maybe_coroutine(lambda maybeAPromise: return isinstance(maybeAPromise, Promise)) def foo(maybeAPromise): # If isinstance(maybeAPromise, Promise), foo behaves like a coroutine, # thus maybeAPromise will get resolved asynchronously and the result will be # pushed back here. # Otherwise, foo behaves like no_coroutine, # just pushing the exact value of maybeAPromise back into the generator. result = yield maybeAPromise print("hello") return result
def reset(self): if self.__row_number > self.__sample_size: self.__parser.reset() self.__extract_sample() self.__extract_headers() self.__row_number = 0
Resets the stream pointer to the beginning of the file.
def choose(msg, items, attr): if len(items) == 1: return items[0] print() for index, i in enumerate(items): name = attr(i) if callable(attr) else getattr(i, attr) print(' %s: %s' % (index, name)) print() while True: try: inp = input('%s: ' % msg) if any(s in inp for s in (':', '::', '-')): idx = slice(*map(lambda x: int(x.strip()) if x.strip() else None, inp.split(':'))) return items[idx] else: return items[int(inp)] except (ValueError, IndexError): pass
Command line helper to display a list of choices, asking the user to choose one of the options.
def ds_geom(ds, t_srs=None): gt = ds.GetGeoTransform() ds_srs = get_ds_srs(ds) if t_srs is None: t_srs = ds_srs ns = ds.RasterXSize nl = ds.RasterYSize x = np.array([0, ns, ns, 0, 0], dtype=float) y = np.array([0, 0, nl, nl, 0], dtype=float) x -= 0.5 y -= 0.5 mx, my = pixelToMap(x, y, gt) geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)])) geom = ogr.CreateGeometryFromWkt(geom_wkt) geom.AssignSpatialReference(ds_srs) if not ds_srs.IsSame(t_srs): geom_transform(geom, t_srs) return geom
Return dataset bbox envelope as geom
def get_result(self, *, block=False, timeout=None): return self.messages[-1].get_result(block=block, timeout=timeout)
Get the result of this pipeline. Pipeline results are represented by the result of the last message in the chain. Parameters: block(bool): Whether or not to block until a result is set. timeout(int): The maximum amount of time, in ms, to wait for a result when block is True. Defaults to 10 seconds. Raises: ResultMissing: When block is False and the result isn't set. ResultTimeout: When waiting for a result times out. Returns: object: The result.
def _endReq(self, key, result=None, success=True): future = self._futures.pop(key, None) self._reqId2Contract.pop(key, None) if future: if result is None: result = self._results.pop(key, []) if not future.done(): if success: future.set_result(result) else: future.set_exception(result)
Finish the future of corresponding key with the given result. If no result is given then it will be popped of the general results.
def reorder(args): import csv p = OptionParser(reorder.__doc__) p.set_sep() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) tabfile, order = args sep = opts.sep order = [int(x) - 1 for x in order.split(",")] reader = csv.reader(must_open(tabfile), delimiter=sep) writer = csv.writer(sys.stdout, delimiter=sep) for row in reader: newrow = [row[x] for x in order] writer.writerow(newrow)
%prog reorder tabfile 1,2,4,3 > newtabfile Reorder columns in tab-delimited files. The above syntax will print out a new file with col-1,2,4,3 from the old file.
def delete(self, obj): obj = self.api.get_object(getattr(obj, 'id', obj)) obj.delete() self.remove(obj.id)
Delete an object in CDSTAR and remove it from the catalog. :param obj: An object ID or an Object instance.
def call(self, callname, arguments=None): action = getattr(self.api, callname, None) if action is None: try: action = self.api.ENDPOINT_OVERRIDES.get(callname, None) except AttributeError: action = callname if not callable(action): request = self._generate_request(action, arguments) if action is None: return self._generate_result( callname, self.api.call(*call_args(callname, arguments))) return self._generate_result( callname, self.api.call(*call_args(action, arguments))) request = self._generate_request(callname, arguments) return self._generate_result(callname, action(request))
Executed on each scheduled iteration
def metrics(self, raw=False): if raw: return self._metrics.metrics.copy() metrics = {} for k, v in six.iteritems(self._metrics.metrics.copy()): if k.group not in metrics: metrics[k.group] = {} if k.name not in metrics[k.group]: metrics[k.group][k.name] = {} metrics[k.group][k.name] = v.value() return metrics
Get metrics on producer performance. This is ported from the Java Producer, for details see: https://kafka.apache.org/documentation/#producer_monitoring Warning: This is an unstable interface. It may change in future releases without warning.
def map_size(self, key): rv = self.get(key) return len(rv.value)
Get the number of items in the map. :param str key: The document ID of the map :return int: The number of items in the map :raise: :cb_exc:`NotFoundError` if the document does not exist. .. seealso:: :meth:`map_add`
def serialize(data): return rapidjson.dumps(data, skipkeys=False, ensure_ascii=False, sort_keys=True)
Serialize a dict into a JSON formatted string. This function enforces rules like the separator and order of keys. This ensures that all dicts are serialized in the same way. This is specially important for hashing data. We need to make sure that everyone serializes their data in the same way so that we do not have hash mismatches for the same structure due to serialization differences. Args: data (dict): dict to serialize Returns: str: JSON formatted string
def trim_wav_pydub(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: logger.info( "Using pydub/ffmpeg to create {} from {}".format(out_path, in_path) + " using a start_time of {} and an end_time of {}".format(start_time, end_time)) if out_path.is_file(): return in_ext = in_path.suffix[1:] out_ext = out_path.suffix[1:] audio = AudioSegment.from_file(str(in_path), in_ext) trimmed = audio[start_time:end_time] trimmed.export(str(out_path), format=out_ext, parameters=["-ac", "1", "-ar", "16000"])
Crops the wav file.
def analyzer_api(url): response.content_type = JSON_MIME ri = get_cached_or_new(url) try: if ri.is_old(): logger.info("Running the analysis.") ri = get_cached_or_new(url, new=True) ri.paralel_processing() except (requests.exceptions.Timeout, requests.ConnectionError) as e: error_msg = error_msg = error_msg.format( url=url, timeout=REQUEST_TIMEOUT, message=str(e.message) ) logger.error(error_msg) return { "status": False, "log": "", "error": error_msg, } except Exception as e: error_msg = str(e.message) + "\n" + traceback.format_exc().strip() logger.error(error_msg) return { "status": False, "log": "ri.get_log()", "error": error_msg, } return { "status": True, "body": ri.to_dict(), "log": "ri.get_log()", }
Analyze given `url` and return output as JSON.
def shape_offset_y(self): min_y = self._start_y for drawing_operation in self: if hasattr(drawing_operation, 'y'): min_y = min(min_y, drawing_operation.y) return min_y
Return y distance of shape origin from local coordinate origin. The returned integer represents the topmost extent of the freeform shape, in local coordinates. Note that the bounding box of the shape need not start at the local origin.
def list(self, id=None): args = {'id': id} self._job_chk.check(args) return self._client.json('job.list', args)
List all running jobs :param id: optional ID for the job to list
def get_text(self): self._load_raw_content() if self._text is None: assert self._raw_content is not None ret_cont = self._raw_content if self.compressed: ret_cont = zlib.decompress(ret_cont, zlib.MAX_WBITS+16) if self.encoded: ret_cont = ret_cont.decode('utf-8') self._text = ret_cont assert self._text is not None return self._text
Get the loaded, decompressed, and decoded text of this content.
def _fmtos(self): plotters = self.plotters if len(plotters) == 0: return {} p0 = plotters[0] if len(plotters) == 1: return p0._fmtos return (getattr(p0, key) for key in set(p0).intersection( *map(set, plotters[1:])))
An iterator over formatoption objects Contains only the formatoption whose keys are in all plotters in this list
def take_bug_reports(ads, test_name, begin_time, destination=None): begin_time = mobly_logger.normalize_log_line_timestamp(str(begin_time)) def take_br(test_name, begin_time, ad, destination): ad.take_bug_report(test_name, begin_time, destination=destination) args = [(test_name, begin_time, ad, destination) for ad in ads] utils.concurrent_exec(take_br, args)
Takes bug reports on a list of android devices. If you want to take a bug report, call this function with a list of android_device objects in on_fail. But reports will be taken on all the devices in the list concurrently. Bug report takes a relative long time to take, so use this cautiously. Args: ads: A list of AndroidDevice instances. test_name: Name of the test method that triggered this bug report. begin_time: timestamp taken when the test started, can be either string or int. destination: string, path to the directory where the bugreport should be saved.
def _dlog(self, msg, indent_increase=0): self._log.debug("interp", msg, indent_increase, filename=self._orig_filename, coord=self._coord)
log the message to the log
def scoped_format(txt, **objects): pretty = objects.pop("pretty", RecursiveAttribute.format_pretty) expand = objects.pop("expand", RecursiveAttribute.format_expand) attr = RecursiveAttribute(objects, read_only=True) formatter = scoped_formatter(**objects) return formatter.format(txt, pretty=pretty, expand=expand)
Format a string with respect to a set of objects' attributes. Example: >>> Class Foo(object): >>> def __init__(self): >>> self.name = "Dave" >>> print scoped_format("hello {foo.name}", foo=Foo()) hello Dave Args: objects (dict): Dict of objects to format with. If a value is a dict, its values, and any further neted dicts, will also format with dot notation. pretty (bool): See `ObjectStringFormatter`. expand (bool): See `ObjectStringFormatter`.
def _update_callsafety(self, response): if self.ratelimit is not None: self.callsafety['lastcalltime'] = time() self.callsafety['lastlimitremaining'] = int(response.headers.get('X-Rate-Limit-Remaining', 0))
Update the callsafety data structure
def append_logs_to_result_object(result_obj, result): logs = result.has_logs() result_obj["exec"]["logs"] = [] if logs and result.logfiles: for log in logs: typ = None parts = log.split(os.sep) if "bench" in parts[len(parts) - 1]: typ = "framework" if typ is not None: name = parts[len(parts) - 1] try: with open(log, "r") as file_name: data = file_name.read() dic = {"data": data, "name": name, "from": typ} result_obj["exec"]["logs"].append(dic) except OSError: pass else: continue
Append log files to cloud result object from Result. :param result_obj: Target result object :param result: Result :return: Nothing, modifies result_obj in place.
def load_manifests(self): for path in self.plugin_paths: for item in os.listdir(path): item_path = os.path.join(path, item) if os.path.isdir(item_path): self.load_manifest(item_path)
Loads all plugin manifests on the plugin path
def _get_YYTfactor(self, Y): N, D = Y.shape if (N>=D): return Y.view(np.ndarray) else: return jitchol(tdot(Y))
find a matrix L which satisfies LLT = YYT. Note that L may have fewer columns than Y.
def setup(options): sys.path.insert(0, options.gae_lib_path) from dev_appserver import fix_sys_path fix_sys_path()
Grabs the gae_lib_path from the options and inserts it into the first index of the sys.path. Then calls GAE's fix_sys_path to get all the proper GAE paths included. :param options:
def build_attachment2(): attachment = Attachment() attachment.content = "BwdW" attachment.type = "image/png" attachment.filename = "banner.png" attachment.disposition = "inline" attachment.content_id = "Banner" return attachment
Build attachment mock.
def update(self): response = requests.get(self.update_url, timeout=timeout) match = ip_pattern.search(response.content) if not match: raise ApiError("Couldn't parse the server's response", response.content) self.ip = match.group(0)
Updates remote DNS record by requesting its special endpoint URL
def fib(n): assert n > 0 a, b = 1, 1 for i in range(n - 1): a, b = b, a + b return a
Fibonacci example function Args: n (int): integer Returns: int: n-th Fibonacci number
def _download_wrapper(self, url, *args, **kwargs): try: return url, self._file_downloader.download(url, *args, **kwargs) except Exception as e: logging.error("AbstractDownloader: %s", traceback.format_exc()) return url, e
Actual download call. Calls the underlying file downloader, catches all exceptions and returns the result.
def username(self): token = self.session.params.get('access_token') if not token: raise errors.TokenError( "session does not have a valid access_token param") data = token.split('.')[1] data = data.replace('-', '+').replace('_', '/') + "===" try: return json.loads(base64.b64decode(data).decode('utf-8'))['u'] except (ValueError, KeyError): raise errors.TokenError( "access_token does not contain username")
The username in the service's access token Returns ------- str
def _freebayes_custom(in_file, ref_file, data): if vcfutils.get_paired_phenotype(data): return None config = data["config"] bv_ver = programs.get_version("bcbio_variation", config=config) if LooseVersion(bv_ver) < LooseVersion("0.1.1"): return None out_file = "%s-filter%s" % os.path.splitext(in_file) if not utils.file_exists(out_file): tmp_dir = utils.safe_makedir(os.path.join(os.path.dirname(in_file), "tmp")) resources = config_utils.get_resources("bcbio_variation", config) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"]) java_args = ["-Djava.io.tmpdir=%s" % tmp_dir] cmd = ["bcbio-variation"] + jvm_opts + java_args + \ ["variant-filter", "freebayes", in_file, ref_file] do.run(cmd, "Custom FreeBayes filtering using bcbio.variation") return out_file
Custom FreeBayes filtering using bcbio.variation, tuned to human NA12878 results. Experimental: for testing new methods.
def find_next(self): state = self.find(changed=False, forward=True, rehighlight=False, multiline_replace_check=False) self.editor.setFocus() self.search_text.add_current_text() return state
Find next occurrence
def upload(self, file_path, timeout=-1): return self._client.upload(file_path, timeout=timeout)
Upload an SPP ISO image file or a hotfix file to the appliance. The API supports upload of one hotfix at a time into the system. For the successful upload of a hotfix, ensure its original name and extension are not altered. Args: file_path: Full path to firmware. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Information about the updated firmware bundle.
def readBatchTupleQuotes(self, symbols, start, end): if end is None: end=sys.maxint ret={} session=self.getReadSession()() try: symbolChunks=splitListEqually(symbols, 100) for chunk in symbolChunks: rows=session.query(Quote.symbol, Quote.time, Quote.close, Quote.volume, Quote.low, Quote.high).filter(and_(Quote.symbol.in_(chunk), Quote.time >= int(start), Quote.time < int(end))) for row in rows: if row.time not in ret: ret[row.time]={} ret[row.time][row.symbol]=self.__sqlToTupleQuote(row) finally: self.getReadSession().remove() return ret
read batch quotes as tuple to save memory
def update(self, old, new): i = self.rank[old] del self.rank[old] self.heap[i] = new self.rank[new] = i if old < new: self.down(i) else: self.up(i)
Replace an element in the heap
def asset_create(self, name, items, tag='', description='', atype='static'): data = { 'name': name, 'description': description, 'type': atype, 'tags': tag } if atype == 'static': data['definedIPs'] = ','.join(items) if atype == 'dns': data['type'] = 'dnsname' data['definedDNSNames'] = ' '.join(items) return self.raw_query('asset', 'add', data=data)
asset_create_static name, ips, tags, description Create a new asset list with the defined information. UN-DOCUMENTED CALL: This function is not considered stable. :param name: asset list name (must be unique) :type name: string :param items: list of IP Addresses, CIDR, and Network Ranges :type items: list :param tag: The tag associate to the asset list :type tag: string :param description: The Asset List description :type description: string
def listen_until_return(self, *temporary_handlers, timeout=0): start = time.time() while timeout == 0 or time.time() - start < timeout: res = self.listen(*temporary_handlers) if res is not None: return res
Calls listen repeatedly until listen returns something else than None. Then returns listen's result. If timeout is not zero listen_until_return stops after timeout seconds and returns None.
def _getsolution(self, config, section, **kwargs): if section not in config: raise ValueError('Section [{}] not found in [{}]'.format(section, ', '.join(config.sections()))) s = VSGSolution(**kwargs) s.Name = config.get(section, 'name', fallback=s.Name) s.FileName = os.path.normpath(config.get(section, 'filename', fallback=s.FileName)) s.VSVersion = config.getfloat(section, 'visual_studio_version', fallback=s.VSVersion) if not s.VSVersion: raise ValueError('Solution section [%s] requires a value for Visual Studio Version (visual_studio_version)' % section) project_sections = config.getlist(section, 'projects', fallback=[]) for project_section in project_sections: project = self._getproject(config, project_section, VSVersion=s.VSVersion) s.Projects.append(project) return s
Creates a VSG solution from a configparser instance. :param object config: The instance of the configparser class :param str section: The section name to read. :param kwargs: List of additional keyworded arguments to be passed into the VSGSolution. :return: A valid VSGSolution instance if succesful; None otherwise.
async def connect(self): self.tls_context = None if self.tls: self.tls_context = self.create_tls_context() (self.reader, self.writer) = await asyncio.open_connection( host=self.hostname, port=self.port, local_addr=self.source_address, ssl=self.tls_context, loop=self.eventloop )
Connect to target.
def pkgdb(opts): return LazyLoader( _module_dirs( opts, 'pkgdb', base_path=os.path.join(SALT_BASE_PATH, 'spm') ), opts, tag='pkgdb' )
Return modules for SPM's package database .. versionadded:: 2015.8.0
async def close(self, code: int = 1006, reason: str = "Connection closed"): if self._closed: return self._closed = True if self._scope is not None: await self._scope.cancel() data = self._connection.send(CloseConnection(code=code, reason=reason)) await self._sock.send_all(data) await self._sock.close()
Closes the websocket.
def create_info(name, info_type, url=None, parent=None, id=None, context=ctx_default, store=False): id = str(uuid4()) if id is None else id pubsub = _pubsub_key(id) info = {'id': id, 'type': info_type, 'pubsub': pubsub, 'url': url, 'parent': parent, 'context': context, 'name': name, 'status': 'Queued' if info_type == 'job' else None, 'date_start': None, 'date_end': None, 'date_created': str(datetime.now()), 'result': None} if store: r_client.set(id, json_encode(info)) if parent is not None: r_client.sadd(_children_key(parent), id) return info
Return a group object
def preview(pid, record, template=None, **kwargs): fileobj = current_previewer.record_file_factory( pid, record, request.view_args.get( 'filename', request.args.get('filename', type=str)) ) if not fileobj: abort(404) try: file_previewer = fileobj['previewer'] except KeyError: file_previewer = None fileobj = PreviewFile(pid, record, fileobj) for plugin in current_previewer.iter_previewers( previewers=[file_previewer] if file_previewer else None): if plugin.can_preview(fileobj): try: return plugin.preview(fileobj) except Exception: current_app.logger.warning( ('Preview failed for {key}, in {pid_type}:{pid_value}' .format(key=fileobj.file.key, pid_type=fileobj.pid.pid_type, pid_value=fileobj.pid.pid_value)), exc_info=True) return default.preview(fileobj)
Preview file for given record. Plug this method into your ``RECORDS_UI_ENDPOINTS`` configuration: .. code-block:: python RECORDS_UI_ENDPOINTS = dict( recid=dict( # ... route='/records/<pid_value/preview/<path:filename>', view_imp='invenio_previewer.views.preview', record_class='invenio_records_files.api:Record', ) )
def _explode_raster(raster, band_names=[]): if not band_names: band_names = raster.band_names else: band_names = list(IndexedSet(raster.band_names).intersection(band_names)) return [_Raster(image=raster.bands_data([band_name]), band_names=[band_name]) for band_name in band_names]
Splits a raster into multiband rasters.
def _GetMessage(self, event_object): formatter_mediator = formatters_mediator.FormatterMediator() result = '' try: result, _ = formatters_manager.FormattersManager.GetMessageStrings( formatter_mediator, event_object) except KeyError as exception: logging.warning( 'Unable to correctly assemble event with error: {0!s}'.format( exception)) return result
Returns a properly formatted message string. Args: event_object: the event object (instance od EventObject). Returns: A formatted message string.
def break_bond(self, ind1, ind2, tol=0.2): sites = self._sites clusters = [[sites[ind1]], [sites[ind2]]] sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)] def belongs_to_cluster(site, cluster): for test_site in cluster: if CovalentBond.is_bonded(site, test_site, tol=tol): return True return False while len(sites) > 0: unmatched = [] for site in sites: for cluster in clusters: if belongs_to_cluster(site, cluster): cluster.append(site) break else: unmatched.append(site) if len(unmatched) == len(sites): raise ValueError("Not all sites are matched!") sites = unmatched return (self.__class__.from_sites(cluster) for cluster in clusters)
Returns two molecules based on breaking the bond between atoms at index ind1 and ind2. Args: ind1 (int): Index of first site. ind2 (int): Index of second site. tol (float): Relative tolerance to test. Basically, the code checks if the distance between the sites is less than (1 + tol) * typical bond distances. Defaults to 0.2, i.e., 20% longer. Returns: Two Molecule objects representing the two clusters formed from breaking the bond.
def data_transforms_mnist(args, mnist_mean=None, mnist_std=None): if mnist_mean is None: mnist_mean = [0.5] if mnist_std is None: mnist_std = [0.5] train_transform = transforms.Compose( [ transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std), ] ) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) valid_transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std)] ) return train_transform, valid_transform
data_transforms for mnist dataset
def lf (self): old_r = self.cur_r self.cursor_down() if old_r == self.cur_r: self.scroll_up () self.erase_line()
This moves the cursor down with scrolling.
def value(self, raw_value): try: return decimal.Decimal(raw_value) except decimal.InvalidOperation: raise ValueError( "Could not parse '{}' value as decimal".format(raw_value) )
Decode param as decimal value.
def total_scores_in(self, leaderboard_name): return sum([leader[self.SCORE_KEY] for leader in self.all_leaders_from(self.leaderboard_name)])
Sum of scores for all members in the named leaderboard. @param leaderboard_name Name of the leaderboard. @return Sum of scores for all members in the named leaderboard.
def getParentElementCustomFilter(self, filterFunc): parentNode = self.parentNode while parentNode: if filterFunc(parentNode) is True: return parentNode parentNode = parentNode.parentNode return None
getParentElementCustomFilter - Runs through parent on up to document root, returning the first tag which filterFunc(tag) returns True. @param filterFunc <function/lambda> - A function or lambda expression that should return "True" if the passed node matches criteria. @return <AdvancedTag/None> - First match, or None @see getFirstElementCustomFilter for matches against children
def _GenApiConfigCallback(args, api_func=GenApiConfig): service_configs = api_func(args.service, hostname=args.hostname, application_path=args.application) for api_name_version, config in service_configs.iteritems(): _WriteFile(args.output, api_name_version + '.api', config)
Generate an api file. Args: args: An argparse.Namespace object to extract parameters from. api_func: A function that generates and returns an API configuration for a list of services.