code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def variable_iter(self, base): base_substs = dict(('<' + t + '>', u) for (t, u) in base.items()) substs = [] vals = [] for with_defn in self.with_exprs: substs.append('<' + with_defn[0] + '>') vals.append(Host.expand_with(with_defn[1:])) for val_tpl in product(*vals): r = base_substs.copy() r.update(dict(zip(substs, val_tpl))) yield r
returns iterator over the cross product of the variables for this stanza
def transform(self, vector): if isinstance(vector, RDD): vector = vector.map(_convert_to_vector) else: vector = _convert_to_vector(vector) return callMLlibFunc("elementwiseProductVector", self.scalingVector, vector)
Computes the Hadamard product of the vector.
def add_nodes(self, nodes): if not isinstance(nodes, list): add_list = [nodes] else: add_list = nodes self.node_list.extend(add_list)
Add a given node or list of nodes to self.node_list. Args: node (Node or list[Node]): the node or list of nodes to add to the graph Returns: None Examples: Adding one node: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> graph.add_nodes(node_1) >>> print([node.value for node in graph.node_list]) ['One'] Adding multiple nodes at a time in a list: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> graph.add_nodes([node_1, node_2]) >>> print([node.value for node in graph.node_list]) ['One', 'Two']
async def jsk_git(self, ctx: commands.Context, *, argument: CodeblockConverter): return await ctx.invoke(self.jsk_shell, argument=Codeblock(argument.language, "git " + argument.content))
Shortcut for 'jsk sh git'. Invokes the system shell.
def unpickle(pickle_file): pickle = None with open(pickle_file, "rb") as pickle_f: pickle = dill.load(pickle_f) if not pickle: LOG.error("Could not load python object from file") return pickle
Unpickle a python object from the given path.
def _get_hanging_wall_coeffs_mag(self, C, mag): if mag < 5.5: return 0.0 elif mag > 6.5: return 1.0 + C["a2"] * (mag - 6.5) else: return (mag - 5.5) * (1.0 + C["a2"] * (mag - 6.5))
Returns the hanging wall magnitude term defined in equation 14
def initialize(cls) -> None: if cls._initialized: return io_loop = ioloop.IOLoop.current() cls._old_sigchld = signal.signal( signal.SIGCHLD, lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup), ) cls._initialized = True
Initializes the ``SIGCHLD`` handler. The signal handler is run on an `.IOLoop` to avoid locking issues. Note that the `.IOLoop` used for signal handling need not be the same one used by individual Subprocess objects (as long as the ``IOLoops`` are each running in separate threads). .. versionchanged:: 5.0 The ``io_loop`` argument (deprecated since version 4.1) has been removed. Availability: Unix
def get_aws_secrets_from_env(): keys = set() for env_var in ( 'AWS_SECRET_ACCESS_KEY', 'AWS_SECURITY_TOKEN', 'AWS_SESSION_TOKEN', ): if env_var in os.environ: keys.add(os.environ[env_var]) return keys
Extract AWS secrets from environment variables.
def set_widgets(self): self.tblFunctions1.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch) self.tblFunctions1.verticalHeader().setSectionResizeMode( QHeaderView.Stretch) self.populate_function_table_1()
Set widgets on the Impact Functions Table 1 tab.
def load_stylesheet(pyside=True): if pyside: import qdarkstyle.pyside_style_rc else: import qdarkstyle.pyqt_style_rc if not pyside: from PyQt4.QtCore import QFile, QTextStream else: from PySide.QtCore import QFile, QTextStream f = QFile(":qdarkstyle/style.qss") if not f.exists(): _logger().error("Unable to load stylesheet, file not found in " "resources") return "" else: f.open(QFile.ReadOnly | QFile.Text) ts = QTextStream(f) stylesheet = ts.readAll() if platform.system().lower() == 'darwin': mac_fix = stylesheet += mac_fix return stylesheet
Loads the stylesheet. Takes care of importing the rc module. :param pyside: True to load the pyside rc file, False to load the PyQt rc file :return the stylesheet string
def set_image(self, image = None): if image is None or type(image) is not int: raise KPError("Need a new image number") else: self.image = image self.last_mod = datetime.now().replace(microsecond=0) return True
This method is used to set the image number. image must be an unsigned int.
def _extend_word(self, word, length, prefix=0, end=False, flatten=False): if len(word) == length: if end and "<" not in self[word[-1]]: raise GenerationError(word + " cannot be extended") else: return word else: exclude = {"<"} while True: choices = self.weighted_choices(word[-prefix if prefix > 0 else 0:], exclude=exclude, flatten=flatten) if not choices: raise GenerationError(word + " cannot be extended") character = random_weighted_choice(choices) word += character try: word = self._extend_word(word, length, prefix=prefix, end=end, flatten=flatten) return word except GenerationError: exclude.add(character) word = word[:-1]
Extend the given word with a random suffix up to length. :param length: the length of the extended word; >= len(word); :param prefix: if greater than 0, the maximum length of the prefix to consider to choose the next character; :param end: if True, the generated word ends as a word of table; :param flatten: whether or not consider the table as flattened; :return: a random word of length generated from table, extending word. :raises GenerationError: if the generated word cannot be extended to length.
def _merge(x, y): merged = {**x, **y} xkeys = x.keys() for key in xkeys: if isinstance(x[key], dict) and key in y: merged[key] = _merge(x[key], y[key]) return merged
Merge two nested dictionaries. Overwrite values in x with values in y.
def set_environment_variable(self, key, val): if self.get_environment_variable(key) in [None, val]: self.__dict__['environment_variables'][key] = val else: raise Contradiction("Could not set environment variable %s" % (key))
Sets a variable if that variable is not already set
def call(command, collect_missing=False, silent=True): r return (_execCommand if silent else execCommand)(shlex.split(command), collect_missing)
r"""Calls a task, as if it were called from the command line. Args: command (str): A route followed by params (as if it were entered in the shell). collect_missing (bool): Collects any missing argument for the command through the shell. Defaults to False. Returns: The return value of the called command.
def _sort_modules(mods): def compare(x, y): x = x[1] y = y[1] if x == y: return 0 if y.stem == "__init__.py": return 1 if x.stem == "__init__.py" or x < y: return -1 return 1 return sorted(mods, key=cmp_to_key(compare))
Always sort `index` or `README` as first filename in list.
def update(self, byte_arr): if byte_arr: self.value = self.calculate(byte_arr, self.value)
Read bytes and update the CRC computed.
def match_patterns(codedata) : ret = {} for index1, pattern in enumerate(shaman.PatternMatcher.PATTERNS) : print('Matching pattern %d "%s"' % (index1+1, pattern)) matcher = shaman.PatternMatcher(pattern) tmp = {} for index2, (language, code) in enumerate(codedata) : if language not in shaman.SUPPORTING_LANGUAGES : continue if len(code) <= 20 or len(code) > 100000 : continue if language not in tmp : tmp[language] = [] ratio = matcher.getratio(code) tmp[language].append(ratio) print('Matching patterns %d/%d ' % (index2, len(codedata)), end='\r') ret[pattern] = {} for language, data in tmp.items() : ret[pattern][language] = sum(tmp[language]) / max(len(tmp[language]), 1) print('Matching patterns completed ') return ret
Match patterns by shaman.PatternMatcher Get average ratio of pattern and language
def find_triangles(self): return list(filter(lambda x: len(x) == 3, nx.find_cliques(self.model)))
Finds all the triangles present in the given model Examples -------- >>> from pgmpy.models import MarkovModel >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.inference import Mplp >>> mm = MarkovModel() >>> mm.add_nodes_from(['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7']) >>> mm.add_edges_from([('x1', 'x3'), ('x1', 'x4'), ('x2', 'x4'), ... ('x2', 'x5'), ('x3', 'x6'), ('x4', 'x6'), ... ('x4', 'x7'), ('x5', 'x7')]) >>> phi = [DiscreteFactor(edge, [2, 2], np.random.rand(4)) for edge in mm.edges()] >>> mm.add_factors(*phi) >>> mplp = Mplp(mm) >>> mplp.find_triangles()
def write_numeric(fmt, value, buff, byteorder='big'): try: buff.write(fmt[byteorder].pack(value)) except KeyError as exc: raise ValueError('Invalid byte order') from exc
Write a numeric value to a file-like object.
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return EntityInstance( self._version, payload, service_sid=self._solution['service_sid'], identity=self._solution['identity'], )
Fetch a EntityInstance :returns: Fetched EntityInstance :rtype: twilio.rest.authy.v1.service.entity.EntityInstance
def ft1file(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.ft1file_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
return the name of the input ft1 file list
def get_terms(self, field=None): if not field: raise AttributeError("Please provide field to apply aggregation to!") agg = A("terms", field=field, size=self.size, order={"_count": "desc"}) self.aggregations['terms_' + field] = agg return self
Create a terms aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods
def put(self, item, block=True, timeout=None): return self._queue.put(item, block, timeout)
Put item into underlying queue.
def search(self, name, value): partial = None header_name_search_result = CocaineHeaders.STATIC_TABLE_MAPPING.get(name) if header_name_search_result: index = header_name_search_result[1].get(value) if index is not None: return index, name, value partial = (header_name_search_result[0], name, None) offset = len(CocaineHeaders.STATIC_TABLE) for (i, (n, v)) in enumerate(self.dynamic_entries): if n == name: if v == value: return i + offset + 1, n, v elif partial is None: partial = (i + offset + 1, n, None) return partial
Searches the table for the entry specified by name and value Returns one of the following: - ``None``, no match at all - ``(index, name, None)`` for partial matches on name only. - ``(index, name, value)`` for perfect matches.
def clear_rr_ce_entries(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('This Primary Volume Descriptor is not yet initialized') for block in self.rr_ce_blocks: block.set_extent_location(-1)
A method to clear out all of the extent locations of all Rock Ridge Continuation Entries that the PVD is tracking. This can be used to reset all data before assigning new data. Parameters: None. Returns: Nothing.
def arches(self): if self.method == 'image': return self.params[2] if self.arch: return [self.arch] return []
Return a list of architectures for this task. :returns: a list of arch strings (eg ["ppc64le", "x86_64"]). The list is empty if this task has no arches associated with it.
def rotation_matrix(d): sin_angle = np.linalg.norm(d) if sin_angle == 0: return np.identity(3) d /= sin_angle eye = np.eye(3) ddt = np.outer(d, d) skew = np.array([[ 0, d[2], -d[1]], [-d[2], 0, d[0]], [ d[1], -d[0], 0]], dtype=np.float64) M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew return M
Calculates a rotation matrix given a vector d. The direction of d corresponds to the rotation axis. The length of d corresponds to the sin of the angle of rotation. Variant of: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
def Run(self, args): try: directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) except (IOError, OSError) as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e) return files = list(directory.ListFiles()) files.sort(key=lambda x: x.pathspec.path) for response in files: self.SendReply(response)
Lists a directory.
def _get_cache_key(self, args, kwargs): hash_input = json.dumps({'name': self.name, 'args': args, 'kwargs': kwargs}, sort_keys=True) return hashlib.md5(hash_input).hexdigest()
Returns key to be used in cache
def _build_circle(self): total_weight = 0 for node in self._nodes: total_weight += self._weights.get(node, 1) for node in self._nodes: weight = self._weights.get(node, 1) ks = math.floor((40 * len(self._nodes) * weight) / total_weight) for i in xrange(0, int(ks)): b_key = self._md5_digest('%s-%s-salt' % (node, i)) for l in xrange(0, 4): key = ((b_key[3 + l * 4] << 24) | (b_key[2 + l * 4] << 16) | (b_key[1 + l * 4] << 8) | b_key[l * 4]) self._hashring[key] = node self._sorted_keys.append(key) self._sorted_keys.sort()
Creates hash ring.
def get_single_payload(self, query_obj): payload = self.get_df_payload(query_obj) df = payload.get('df') status = payload.get('status') if status != utils.QueryStatus.FAILED: if df is not None and df.empty: payload['error'] = 'No data' else: payload['data'] = self.get_data(df) if 'df' in payload: del payload['df'] return payload
Returns a payload of metadata and data
def make_url(self, method): token = self.settings()['token'] return TELEGRAM_URL.format( token=quote(token), method=quote(method), )
Generate a Telegram URL for this bot.
def calculate_windows(self, **kwargs): windows = find_windows(self.elements, self.coordinates, **kwargs) if windows: self.properties.update( { 'windows': { 'diameters': windows[0], 'centre_of_mass': windows[1], } } ) return windows[0] else: self.properties.update( {'windows': {'diameters': None, 'centre_of_mass': None, }} ) return None
Return the diameters of all windows in a molecule. This function first finds and then measures the diameters of all the window in the molecule. Returns ------- :class:`numpy.array` An array of windows' diameters. :class:`NoneType` If no windows were found.
def pass_job(db: JobDB, result_queue: Queue, always_cache=False): @pull def pass_job_stream(job_source): result_sink = result_queue.sink() for message in job_source(): if message is EndOfQueue: return key, job = message if always_cache or ('store' in job.hints): status, retrieved_result = db.add_job_to_db(key, job) if status == 'retrieved': result_sink.send(retrieved_result) continue elif status == 'attached': continue yield message return pass_job_stream
Create a pull stream that receives jobs and passes them on to the database. If the job already has a result, that result is pushed onto the `result_queue`.
def prerequisites(self): prereqs = defaultdict(set) for input in self.inputs: spec = self._study.spec(input) if spec.is_spec and spec.derived: prereqs[spec.pipeline_getter].add(input.name) return prereqs
Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines
def bytes2guid(s): assert isinstance(s, bytes) u = struct.unpack v = [] v.extend(u("<IHH", s[:8])) v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:])) return "%08X-%04X-%04X-%04X-%012X" % tuple(v)
Converts a serialized GUID to a text GUID
def load(self, mkey, mdesc, mdict=None, merge=False): j = mdict if mdict else read_json(mdesc) if j and isinstance(j, dict): self.__meta['header'].update({mkey: mdesc}) if merge: self.__meta = dict_merge(self.__meta, j) else: self.__meta['import'][mkey] = j self.log = shell_notify( 'load %s data and %s it into meta' % ( 'got' if mdict else 'read', 'merged' if merge else 'imported' ), more=dict(mkey=mkey, mdesc=mdesc, merge=merge), verbose=self.__verbose ) return j
Loads a dictionary into current meta :param mkey: Type of data to load. Is be used to reference the data from the 'header' within meta :param mdesc: Either filename of json-file to load or further description of imported data when `mdict` is used :param dict mdict: Directly pass data as dictionary instead of loading it from a json-file. Make sure to set `mkey` and `mdesc` accordingly :param merge: Merge received data into current meta or place it under 'import' within meta :returns: The loaded (or directly passed) content
def DeleteConflict(self, conflict_link, options=None): if options is None: options = {} path = base.GetPathFromLink(conflict_link) conflict_id = base.GetResourceIdOrFullNameFromLink(conflict_link) return self.DeleteResource(path, 'conflicts', conflict_id, None, options)
Deletes a conflict. :param str conflict_link: The link to the conflict. :param dict options: The request options for the request. :return: The deleted Conflict. :rtype: dict
def process_signal(self, signum): if signum == signal.SIGTERM: LOGGER.info('Received SIGTERM, initiating shutdown') self.stop() elif signum == signal.SIGHUP: LOGGER.info('Received SIGHUP') if self.config.reload(): LOGGER.info('Configuration reloaded') logging.config.dictConfig(self.config.logging) self.on_configuration_reloaded() elif signum == signal.SIGUSR1: self.on_sigusr1() elif signum == signal.SIGUSR2: self.on_sigusr2()
Invoked whenever a signal is added to the stack. :param int signum: The signal that was added
def start(self): assert not self._started self._listening_stream.on_recv(self._recv_callback) self._started = True
Start to listen for incoming requests.
def addfield(self, pkt, buf, val): self.set_endianess(pkt) return self.fld.addfield(pkt, buf, val)
add the field with endianness to the buffer
def split_line(self): hash_or_end = self.line.find(" temp = self.line[self.region_end:hash_or_end].strip(" |") self.coord_str = regex_paren.sub("", temp) if hash_or_end >= 0: self.meta_str = self.line[hash_or_end:] else: self.meta_str = ""
Split line into coordinates and meta string
def save_file_json(data, export_file): create_dir(os.path.dirname(export_file)) with open(export_file, "w") as file: json.dump(data, file, indent=4)
Write data to a json file.
def parse_metadata(cls, obj, xml): for child in xml.xpath("ti:description", namespaces=XPATH_NAMESPACES): lg = child.get("{http://www.w3.org/XML/1998/namespace}lang") if lg is not None: obj.set_cts_property("description", child.text, lg) for child in xml.xpath("ti:label", namespaces=XPATH_NAMESPACES): lg = child.get("{http://www.w3.org/XML/1998/namespace}lang") if lg is not None: obj.set_cts_property("label", child.text, lg) obj.citation = cls.CLASS_CITATION.ingest(xml, obj.citation, "ti:online/ti:citationMapping/ti:citation") for child in xml.xpath("ti:about", namespaces=XPATH_NAMESPACES): obj.set_link(RDF_NAMESPACES.CTS.term("about"), child.get('urn')) _parse_structured_metadata(obj, xml)
Parse a resource to feed the object :param obj: Obj to set metadata of :type obj: XmlCtsTextMetadata :param xml: An xml representation object :type xml: lxml.etree._Element
def create(args): with _catalog(args) as cat: for fname, created, obj in cat.create(args.args[0], {}): args.log.info('{0} -> {1} object {2.id}'.format( fname, 'new' if created else 'existing', obj))
cdstarcat create PATH Create objects in CDSTAR specified by PATH. When PATH is a file, a single object (possibly with multiple bitstreams) is created; When PATH is a directory, an object will be created for each file in the directory (recursing into subdirectories).
def interpolate_with(self, other_tf, t): if t < 0 or t > 1: raise ValueError('Must interpolate between 0 and 1') interp_translation = (1.0 - t) * self.translation + t * other_tf.translation interp_rotation = transformations.quaternion_slerp(self.quaternion, other_tf.quaternion, t) interp_tf = RigidTransform(rotation=interp_rotation, translation=interp_translation, from_frame = self.from_frame, to_frame = self.to_frame) return interp_tf
Interpolate with another rigid transformation. Parameters ---------- other_tf : :obj:`RigidTransform` The transform to interpolate with. t : float The interpolation step in [0,1], where 0 favors this RigidTransform. Returns ------- :obj:`RigidTransform` The interpolated RigidTransform. Raises ------ ValueError If t isn't in [0,1].
def norm_score(self): cdf = (1.0 + math.erf(self.score / math.sqrt(2.0))) / 2.0 return 1 - 2*math.fabs(0.5 - cdf)
Return the normalized score. Equals 1.0 for a z-score of 0, falling to 0.0 for extremely positive or negative values.
def bind(self, environ): self.environ = environ self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/') self.method = environ.get('REQUEST_METHOD', 'GET').upper()
Bind a new WSGI environment. This is done automatically for the global `bottle.request` instance on every request.
def _gen_3spec(op, path, xattr=False): flags = 0 if xattr: flags |= _P.SDSPEC_F_XATTR return Spec(op, path, flags)
Returns a Spec tuple suitable for passing to the underlying C extension. This variant is called for operations that lack an input value. :param str path: The path to fetch :param bool xattr: Whether this is an extended attribute :return: a spec suitable for passing to the underlying C extension
def as_rainbow(self, offset=35, style=None, rgb_mode=False): return self._as_rainbow( ('wrapper', ), offset=offset, style=style, rgb_mode=rgb_mode, )
Wrap each frame in a Colr object, using `Colr.rainbow`.
def ci_macos(): run_command("brew install $PYTHON pipenv || echo \"Installed PipEnv\"") command_string = "sudo -H $PIP install " for element in DEPENDENCIES + REQUIREMENTS + ["-U"]: command_string += element + " " run_command(command_string) run_command("sudo -H $PYTHON setup.py bdist_wheel") assert check_wheel_existence() exit(0)
Setup Travis-CI macOS for wheel building
def clicks(self, tag=None, fromdate=None, todate=None): return self.call("GET", "/stats/outbound/clicks", tag=tag, fromdate=fromdate, todate=todate)
Gets total counts of unique links that were clicked.
def search(self, search): search = search.replace('/', ' ') params = {'q': search} return self._get_records(params)
search Zenodo record for string `search` :param search: string to search :return: Record[] results
def get_tags_users(self, id_): return _get_request(_TAGS_USERS.format(c_api=_C_API_BEGINNING, api=_API_VERSION, id_=id_, at=self.access_token))
Get a particular user which are tagged based on the id_
def _set_cursor_position(self, value): original_position = self.__cursor_position self.__cursor_position = max(0, value) return value != original_position
Set cursor position. Return whether it changed.
def git_wrapper(path): path = os.path.abspath(path) if path not in _wrapper_cache: if hasattr(Repo, 'commits'): _wrapper_cache[path] = _GitWrapperLegacy(path) else: _wrapper_cache[path] = _GitWrapper(path) return _wrapper_cache[path]
Get appropriate wrapper factory and cache instance for path
def install_versioning(self, conn): logging.info('Creating the versioning table %s', self.version_table) conn.executescript(CREATE_VERSIONING % self.version_table) self._insert_script(self.read_scripts()[0], conn)
Create the version table into an already populated database and insert the base script. :param conn: a DB API 2 connection
def job(self, name): for job in self.jobs(): if job.data.name == name: return job
Method for searching specific job by it's name. :param name: name of the job to search. :return: found job or None. :rtype: yagocd.resources.job.JobInstance
def decompress(self, chunk): try: return self._decompressobj.decompress(chunk) except zlib.error: if self._first_chunk: self._decompressobj = zlib.decompressobj(-zlib.MAX_WBITS) return self._decompressobj.decompress(chunk) raise finally: self._first_chunk = False
Decompress the chunk of data. :param bytes chunk: data chunk :rtype: bytes
def get_version_history_for_file(self, filepath): GIT_COMMIT_FIELDS = ['id', 'author_name', 'author_email', 'date', 'date_ISO_8601', 'relative_date', 'message_subject', 'message_body'] GIT_LOG_FORMAT = ['%H', '%an', '%ae', '%aD', '%ai', '%ar', '%s', '%b'] GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e' try: log = git(self.gitdir, self.gitwd, '--no-pager', 'log', '--format=%s' % GIT_LOG_FORMAT, '--follow', '--find-renames=100%', '--', filepath) log = log.strip('\n\x1e').split("\x1e") log = [row.strip().split("\x1f") for row in log] log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log] except: _LOG.exception('git log failed') raise return log
Return a dict representation of this file's commit history This uses specially formatted git-log output for easy parsing, as described here: http://blog.lost-theory.org/post/how-to-parse-git-log-output/ For a full list of available fields, see: http://linux.die.net/man/1/git-log
def get_themes(): styles_dir = os.path.join(package_dir, 'styles') themes = [os.path.basename(theme).replace('.less', '') for theme in glob('{0}/*.less'.format(styles_dir))] return themes
return list of available themes
def set_proxy_bypass(domains, network_service="Ethernet"): servers_str = ' '.join(domains) cmd = 'networksetup -setproxybypassdomains {0} {1}'.format(network_service, servers_str,) out = __salt__['cmd.run'](cmd) return 'error' not in out
Sets the domains that can bypass the proxy domains An array of domains allowed to bypass the proxy network_service The network service to apply the changes to, this only necessary on macOS CLI Example: .. code-block:: bash salt '*' proxy.set_proxy_bypass "['127.0.0.1', 'localhost']"
def suspend_queues(self, active_queues, sleep_time=10.0): for queue in active_queues: self.disable_queue(queue) while self.get_active_tasks(): time.sleep(sleep_time)
Suspend Celery queues and wait for running tasks to complete.
def find_all_segment(text: str, custom_dict: Trie = None) -> List[str]: if not text or not isinstance(text, str): return [] ww = list(_multicut(text, custom_dict=custom_dict)) return list(_combine(ww))
Get all possible segment variations :param str text: input string to be tokenized :return: returns list of segment variations
def create_session(self): url = self.build_url(self._endpoints.get('create_session')) response = self.con.post(url, data={'persistChanges': self.persist}) if not response: raise RuntimeError('Could not create session as requested by the user.') data = response.json() self.session_id = data.get('id') return True
Request a new session id
def get_next_action(self, request, application, label, roles): if label is not None: return HttpResponseBadRequest("<h1>Bad Request</h1>") actions = self.get_actions(request, application, roles) if request.method == "GET": context = self.context context.update({ 'application': application, 'actions': actions, 'state': self.name, 'roles': roles}) return render( template_name='kgapplications/common_detail.html', context=context, request=request) elif request.method == "POST": for action in actions: if action in request.POST: return action return HttpResponseBadRequest("<h1>Bad Request</h1>")
Django view method. We provide a default detail view for applications.
def process_pc_pathsbetween(gene_names, neighbor_limit=1, database_filter=None, block_size=None): if not block_size: model = pcc.graph_query('pathsbetween', gene_names, neighbor_limit=neighbor_limit, database_filter=database_filter) if model is not None: return process_model(model) else: gene_blocks = [gene_names[i:i + block_size] for i in range(0, len(gene_names), block_size)] stmts = [] for genes1, genes2 in itertools.product(gene_blocks, repeat=2): if genes1 == genes2: bp = process_pc_pathsbetween(genes1, database_filter=database_filter, block_size=None) else: bp = process_pc_pathsfromto(genes1, genes2, database_filter=database_filter) stmts += bp.statements
Returns a BiopaxProcessor for a PathwayCommons paths-between query. The paths-between query finds the paths between a set of genes. Here source gene names are given in a single list and all directions of paths between these genes are considered. http://www.pathwaycommons.org/pc2/#graph http://www.pathwaycommons.org/pc2/#graph_kind Parameters ---------- gene_names : list A list of HGNC gene symbols to search for paths between. Examples: ['BRAF', 'MAP2K1'] neighbor_limit : Optional[int] The number of steps to limit the length of the paths between the gene names being queried. Default: 1 database_filter : Optional[list] A list of database identifiers to which the query is restricted. Examples: ['reactome'], ['biogrid', 'pid', 'psp'] If not given, all databases are used in the query. For a full list of databases see http://www.pathwaycommons.org/pc2/datasources block_size : Optional[int] Large paths-between queries (above ~60 genes) can error on the server side. In this case, the query can be replaced by a series of smaller paths-between and paths-from-to queries each of which contains block_size genes. Returns ------- bp : BiopaxProcessor A BiopaxProcessor containing the obtained BioPAX model in bp.model.
def promote16(u, fn=None, *args, **kwargs): r dtype = np.float32 if u.dtype == np.float16 else u.dtype up = np.asarray(u, dtype=dtype) if fn is None: return up else: v = fn(up, *args, **kwargs) if isinstance(v, tuple): vp = tuple([np.asarray(vk, dtype=u.dtype) for vk in v]) else: vp = np.asarray(v, dtype=u.dtype) return vp
r""" Utility function for use with functions that do not support arrays of dtype ``np.float16``. This function has two distinct modes of operation. If called with only the `u` parameter specified, the returned value is either `u` itself if `u` is not of dtype ``np.float16``, or `u` promoted to ``np.float32`` dtype if it is. If the function parameter `fn` is specified then `u` is conditionally promoted as described above, passed as the first argument to function `fn`, and the returned values are converted back to dtype ``np.float16`` if `u` is of that dtype. Note that if parameter `fn` is specified, it may not be be specified as a keyword argument if it is followed by any non-keyword arguments. Parameters ---------- u : array_like Array to be promoted to np.float32 if it is of dtype ``np.float16`` fn : function or None, optional (default None) Function to be called with promoted `u` as first parameter and \*args and \*\*kwargs as additional parameters *args Variable length list of arguments for function `fn` **kwargs Keyword arguments for function `fn` Returns ------- up : ndarray Conditionally dtype-promoted version of `u` if `fn` is None, or value(s) returned by `fn`, converted to the same dtype as `u`, if `fn` is a function
def _pull(self): pull = self.m( 'pulling remote changes', cmdd=dict(cmd='git pull --tags', cwd=self.local), critical=False ) if 'CONFLICT' in pull.get('out'): self.m( 'Congratulations! You have merge conflicts in the repository!', state=True, more=pull ) return pull
Helper function to pull from remote
def validate(self, path: str, strictness: str = "speconly") -> bool: valid1 = True with h5py.File(path, mode="r") as f: valid1 = self.validate_spec(f) if not valid1: self.errors.append("For help, see http://linnarssonlab.org/loompy/format/") valid2 = True if strictness == "conventions": with loompy.connect(path, mode="r") as ds: valid2 = self.validate_conventions(ds) if not valid2: self.errors.append("For help, see http://linnarssonlab.org/loompy/conventions/") return valid1 and valid2
Validate a file for conformance to the Loom specification Args: path: Full path to the file to be validated strictness: "speconly" or "conventions" Remarks: In "speconly" mode, conformance is assessed relative to the file format specification at http://linnarssonlab.org/loompy/format/. In "conventions" mode, conformance is additionally assessed relative to attribute name and data type conventions given at http://linnarssonlab.org/loompy/conventions/.
def do_first(self): pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from pngtopnm.") elif (filetype == 'jpg'): if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from jpegtopnm.") else: raise IIIFError(code='501', text='bad input file format (only know how to read png/jpeg)') self.tmpfile = outfile (self.width, self.height) = self.image_size(self.tmpfile)
Create PNM file from input image file.
def update(self, friendly_name=values.unset, assignment_callback_url=values.unset, fallback_assignment_callback_url=values.unset, configuration=values.unset, task_reservation_timeout=values.unset): return self._proxy.update( friendly_name=friendly_name, assignment_callback_url=assignment_callback_url, fallback_assignment_callback_url=fallback_assignment_callback_url, configuration=configuration, task_reservation_timeout=task_reservation_timeout, )
Update the WorkflowInstance :param unicode friendly_name: A string representing a human readable name for this Workflow. :param unicode assignment_callback_url: A valid URL for the application that will process task assignment events. :param unicode fallback_assignment_callback_url: If the request to the AssignmentCallbackUrl fails, the assignment callback will be made to this URL. :param unicode configuration: JSON document configuring the rules for this Workflow. :param unicode task_reservation_timeout: An integer value controlling how long in seconds TaskRouter will wait for a confirmation response from your application after assigning a Task to a worker. :returns: Updated WorkflowInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.WorkflowInstance
def _validate_user_class(cls, user_class): PraetorianError.require_condition( getattr(user_class, 'lookup', None) is not None, textwrap.dedent( ), ) PraetorianError.require_condition( getattr(user_class, 'identify', None) is not None, textwrap.dedent( ), ) return user_class
Validates the supplied user_class to make sure that it has the class methods necessary to function correctly. Requirements: - ``lookup`` method. Accepts a string parameter, returns instance - ``identify`` method. Accepts an identity parameter, returns instance
def _conv(self,v): if isinstance(v,str): return '"%s"' %v.replace("'","''") elif isinstance(v,datetime.datetime): if v.tzinfo is not None: raise ValueError,\ "datetime instances with tzinfo not supported" return '"%s"' %self.db_module.Timestamp(v.year,v.month,v.day, v.hour,v.minute,v.second) elif isinstance(v,datetime.date): return '"%s"' %self.db_module.Date(v.year,v.month,v.day) else: return v
Convert Python values to MySQL values
def update_vip_request(self, vip_request, vip_request_id): uri = 'api/v3/vip-request/%s/' % vip_request_id data = dict() data['vips'] = list() data['vips'].append(vip_request) return super(ApiVipRequest, self).put(uri, data)
Method to update vip request param vip_request: vip_request object param vip_request_id: vip_request id
def parse_compound_table_file(path, f): context = FilePathContext(path) for i, row in enumerate(csv.DictReader(f, delimiter=str('\t'))): if 'id' not in row or row['id'].strip() == '': raise ParseError('Expected `id` column in table') props = {key: value for key, value in iteritems(row) if value != ''} if 'charge' in props: props['charge'] = int(props['charge']) mark = FileMark(context, i + 2, None) yield CompoundEntry(props, mark)
Parse a tab-separated file containing compound IDs and properties The compound properties are parsed according to the header which specifies which property is contained in each column.
def random_shift(image, wsr=0.1, hsr=0.1): height, width, _ = common_layers.shape_list(image) width_range, height_range = wsr*width, hsr*height height_translations = tf.random_uniform((1,), -height_range, height_range) width_translations = tf.random_uniform((1,), -width_range, width_range) translations = tf.concat((height_translations, width_translations), axis=0) return tf.contrib.image.translate(image, translations=translations)
Apply random horizontal and vertical shift to images. This is the default data-augmentation strategy used on CIFAR in Glow. Args: image: a 3-D Tensor wsr: Width shift range, as a float fraction of the width. hsr: Height shift range, as a float fraction of the width. Returns: images: images translated by the provided wsr and hsr.
def assertFileSizeEqual(self, filename, size, msg=None): fsize = self._get_file_size(filename) self.assertEqual(fsize, size, msg=msg)
Fail if ``filename`` does not have the given ``size`` as determined by the '==' operator. Parameters ---------- filename : str, bytes, file-like size : int, float msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``filename`` is not a str or bytes object and is not file-like.
def _compress_json(self, j): compressed_json = copy.copy(j) compressed_json.pop('users', None) compressed_data = zlib.compress( json.dumps(j['users']).encode('utf-8'), self.zlib_compression_strength ) b64_data = base64.b64encode(compressed_data).decode('utf-8') compressed_json['blob'] = b64_data return compressed_json
Compress the BLOB data portion of the usernotes. Arguments: j: the JSON in Schema v5 format (dict) Returns a dict with the 'users' key removed and 'blob' key added
def check_values_selection_field(cr, table_name, field_name, allowed_values): res = True cr.execute("SELECT %s, count(*) FROM %s GROUP BY %s;" % (field_name, table_name, field_name)) for row in cr.fetchall(): if row[0] not in allowed_values: logger.error( "Invalid value '%s' in the table '%s' " "for the field '%s'. (%s rows).", row[0], table_name, field_name, row[1]) res = False return res
check if the field selection 'field_name' of the table 'table_name' has only the values 'allowed_values'. If not return False and log an error. If yes, return True. .. versionadded:: 8.0
def build_command(chunks): if not chunks: raise ValueError( "No command parts: {} ({})".format(chunks, type(chunks))) if isinstance(chunks, str): return chunks parsed_pieces = [] for cmd_part in chunks: if cmd_part is None: continue try: parsed_pieces.append(cmd_part.strip(" ")) except AttributeError: option, argument = cmd_part if argument is None or argument == "": continue option, argument = option.strip(" "), str(argument).strip(" ") parsed_pieces.append("{} {}".format(option, argument)) return " ".join(parsed_pieces)
Create a command from various parts. The parts provided may include a base, flags, option-bound arguments, and positional arguments. Each element must be either a string or a two-tuple. Raw strings are interpreted as either the command base, a pre-joined pair (or multiple pairs) of option and argument, a series of positional arguments, or a combination of those elements. The only modification they undergo is trimming of any space characters from each end. :param Iterable[str | (str, str | NoneType)] chunks: the collection of the command components to interpret, modify, and join to create a single meaningful command :return str: the single meaningful command built from the given components :raise ValueError: if no command parts are provided
def post_save_moderation(self, sender, comment, request, **kwargs): model = comment.content_type.model_class() if model not in self._registry: return self._registry[model].email(comment, comment.content_object, request)
Apply any necessary post-save moderation steps to new comments.
def listTargets(self): sql = 'select * from {}'.format(self.TABLE_ITEMS) cursor = self.db.execute(sql) return [(iid, name, path) for iid, name, path in cursor]
Returns a list of all the items secured in the vault
def tokenize(self, config): tokens = [] reg_ex = re.compile(self.TOKENS[0], re.M | re.I) for token in re.finditer(reg_ex, config): value = token.group(0) if token.group("operator"): t_type = "operator" elif token.group("literal"): t_type = "literal" elif token.group("newline"): t_type = "newline" elif token.group("function"): t_type = "function" elif token.group("unknown"): t_type = "unknown" else: continue tokens.append( {"type": t_type, "value": value, "match": token, "start": token.start()} ) self.tokens = tokens
Break the config into a series of tokens
def discovery(self, url=None): if url: data = self.session.get(url).content elif self.discovery_url: response = self.session.get(self.discovery_url) if self.format == 'xml': data = xml(response.text) else: data = response.json() else: data = self.get('discovery') return data
Retrieve the standard discovery file that provides routing information. >>> Three().discovery() {'discovery': 'data'}
def motif_from_consensus(cons, n=12): width = len(cons) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] m = Motif() for i,char in enumerate(cons): for nuc in m.iupac[char.upper()]: pfm[i][nucs[nuc]] = n / len(m.iupac[char.upper()]) m = Motif(pfm) m.id = cons return m
Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ------- m : Motif instance Motif created from the consensus.
def render_relation(self, r, **args): if r is None: return "." m = self.config.relsymbolmap if r in m: return m[r] return r
Render an object property
def purge_old(self): if self.keep_max is not None: keys = self.redis_conn.keys(self.get_key() + ':*') keys.sort(reverse=True) while len(keys) > self.keep_max: key = keys.pop() self.redis_conn.delete(key)
Removes keys that are beyond our keep_max limit
def msg(self, message, *args, **kwargs): target = kwargs.pop('target', None) raw = kwargs.pop('raw', False) if not target: target = self.line.sender.nick if self.line.pm else \ self.line.target if not raw: kw = { 'm': self, 'b': chr(2), 'c': chr(3), 'u': chr(31), } kw.update(kwargs) try: message = message.format(*args, **kw) except IndexError: if len(args) == 1 and isinstance(args[0], list): message = message.format(*args[0], **kw) else: raise self.connection.msg(target, message)
Shortcut to send a message through the connection. This function sends the input message through the connection. A target can be defined, else it will send it to the channel or user from the input Line, effectively responding on whatever triggered the command which calls this function to be called. If raw has not been set to True, formatting will be applied using the standard Python Formatting Mini-Language, using the additional given args and kwargs, along with some additional kwargs, such as the match object to easily access Regex matches, color codes and other things. http://docs.python.org/3.3/library/string.html#format-string-syntax
def configure(access_key=None, secret_key=None, logger=None): if not logger: logger = log.get_logger('s3') if not all([access_key, secret_key]): logger.info('') access_key = input('AWS Access Key: ') secret_key = input('AWS Secret Key: ') _write_config(access_key, secret_key) logger.info('') logger.info('Completed writing S3 config file.') logger.info('')
Configures s3cmd prior to first use. If no arguments are provided, you will be prompted to enter the access key and secret key interactively. Args: access_key (str): AWS access key secret_key (str): AWS secret key
def _extract_asset_urls(self, asset_ids): dom = get_page(self._session, OPENCOURSE_ASSET_URL, json=True, ids=quote_plus(','.join(asset_ids))) return [{'id': element['id'], 'url': element['url'].strip()} for element in dom['elements']]
Extract asset URLs along with asset ids. @param asset_ids: List of ids to get URLs for. @type assertn: [str] @return: List of dictionaries with asset URLs and ids. @rtype: [{ 'id': '<id>', 'url': '<url>' }]
def getLayerName(url): urlInfo = None urlSplit = None try: urlInfo = urlparse.urlparse(url) urlSplit = str(urlInfo.path).split('/') name = urlSplit[len(urlSplit)-3] return name except: return url finally: urlInfo = None urlSplit = None del urlInfo del urlSplit gc.collect()
Extract the layer name from a url. Args: url (str): The url to parse. Returns: str: The layer name. Examples: >>> url = "http://services.arcgis.com/<random>/arcgis/rest/services/test/FeatureServer/12" >>> arcresthelper.common.getLayerIndex(url) 'test'
def get_all_tep(self): teps = {} for p in self.get_enabled_plugins: for e, v in p["plugin_tep"].items(): tep = teps.get(e, dict()) tepHF = tep.get("HTMLFile", []) tepHS = tep.get("HTMLString", []) tepHF += [s for f, s in v.items() if f == "HTMLFile"] tepHS += [s for f, s in v.items() if f == "HTMLString"] teps[e] = dict(HTMLFile=tepHF, HTMLString=tepHS) return teps
Template extension point :returns: dict: {tep: dict(HTMLFile=[], HTMLString=[]), tep...}
def updateMetadata(self, new): if self.node_id != new.node_id: raise ValueError("Broker metadata {!r} doesn't match node_id={}".format(new, self.node_id)) self.node_id = new.node_id self.host = new.host self.port = new.port
Update the metadata stored for this broker. Future connections made to the broker will use the host and port defined in the new metadata. Any existing connection is not dropped, however. :param new: :clas:`afkak.common.BrokerMetadata` with the same node ID as the current metadata.
def disable_alarm_actions(self, alarm_names): params = {} self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') return self.get_status('DisableAlarmActions', params)
Disables actions for the specified alarms. :type alarms: list :param alarms: List of alarm names.
def sfs_folded(ac, n=None): ac, n = _check_ac_n(ac, n) mac = np.amin(ac, axis=1) mac = mac.astype(int, copy=False) x = n//2 + 1 s = np.bincount(mac, minlength=x) return s
Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k.
def pack_block(self, block: BaseBlock, *args: Any, **kwargs: Any) -> BaseBlock: if 'uncles' in kwargs: uncles = kwargs.pop('uncles') kwargs.setdefault('uncles_hash', keccak(rlp.encode(uncles))) else: uncles = block.uncles provided_fields = set(kwargs.keys()) known_fields = set(BlockHeader._meta.field_names) unknown_fields = provided_fields.difference(known_fields) if unknown_fields: raise AttributeError( "Unable to set the field(s) {0} on the `BlockHeader` class. " "Received the following unexpected fields: {1}.".format( ", ".join(known_fields), ", ".join(unknown_fields), ) ) header = block.header.copy(**kwargs) packed_block = block.copy(uncles=uncles, header=header) return packed_block
Pack block for mining. :param bytes coinbase: 20-byte public address to receive block reward :param bytes uncles_hash: 32 bytes :param bytes state_root: 32 bytes :param bytes transaction_root: 32 bytes :param bytes receipt_root: 32 bytes :param int bloom: :param int gas_used: :param bytes extra_data: 32 bytes :param bytes mix_hash: 32 bytes :param bytes nonce: 8 bytes
def queuedb_findall(path, queue_id, name=None, offset=None, limit=None): sql = "SELECT * FROM queue WHERE queue_id = ? ORDER BY rowid ASC" args = (queue_id,) if name: sql += ' AND name = ?' args += (name,) if limit: sql += ' LIMIT ?' args += (limit,) if offset: sql += ' OFFSET ?' args += (offset,) sql += ';' db = queuedb_open(path) if db is None: raise Exception("Failed to open %s" % path) cur = db.cursor() rows = queuedb_query_execute(cur, sql, args) count = 0 ret = [] for row in rows: dat = {} dat.update(row) ret.append(dat) db.close() return ret
Get all queued entries for a queue and a name. If name is None, then find all queue entries Return the rows on success (empty list if not found) Raise on error
def cleanup(self): if self.data.hooks and len(self.data.hooks.cleanup) > 0: env = self.data.env_list[0].copy() env.update({'PIPELINE_RESULT': 'SUCCESS', 'PIPELINE_SHELL_EXIT_CODE': '0'}) config = ShellConfig(script=self.data.hooks.cleanup, model=self.model, env=env, dry_run=self.options.dry_run, debug=self.options.debug, strict=self.options.strict, temporary_scripts_path=self.options.temporary_scripts_path) cleanup_shell = Bash(config) for line in cleanup_shell.process(): yield line
Run cleanup script of pipeline when hook is configured.