code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def parent(self): parent_id = self._json_data.get('parent_id') if parent_id is None: raise NotFoundError("Cannot find subprocess for this task '{}', " "as this task exist on top level.".format(self.name)) return self._client.activity(pk=parent_id, scope=self.scope_id)
Retrieve the parent in which this activity is defined. If this is a task on top level, it raises NotFounderror. :return: a :class:`Activity2` :raises NotFoundError: when it is a task in the top level of a project :raises APIError: when other error occurs Example ------- >>> task = project.activity('Subtask') >>> parent_of_task = task.parent()
def comparator_eval(comparator_params): top1, top2, params1, params2, seq1, seq2, movements = comparator_params xrot, yrot, zrot, xtrans, ytrans, ztrans = movements obj1 = top1(*params1) obj2 = top2(*params2) obj2.rotate(xrot, [1, 0, 0]) obj2.rotate(yrot, [0, 1, 0]) obj2.rotate(zrot, [0, 0, 1]) obj2.translate([xtrans, ytrans, ztrans]) model = obj1 + obj2 model.relabel_all() model.pack_new_sequences(seq1 + seq2) return model.buff_interaction_energy.total_energy
Gets BUFF score for interaction between two AMPAL objects
def version(self): res = self.client.service.Version() return '.'.join([ustr(x) for x in res[0]])
Return version of the TR DWE.
def build_gtapp(appname, dry_run, **kwargs): pfiles_orig = _set_pfiles(dry_run, **kwargs) gtapp = GtApp.GtApp(appname) update_gtapp(gtapp, **kwargs) _reset_pfiles(pfiles_orig) return gtapp
Build an object that can run ScienceTools application Parameters ---------- appname : str Name of the application (e.g., gtbin) dry_run : bool Print command but do not run it kwargs : arguments used to invoke the application Returns `GtApp.GtApp` object that will run the application in question
def fetch(cls, channel, start, end, bits=None, host=None, port=None, verbose=False, connection=None, type=Nds2ChannelType.any()): new = cls.DictClass.fetch( [channel], start, end, host=host, port=port, verbose=verbose, connection=connection)[channel] if bits: new.bits = bits return new
Fetch data from NDS into a `StateVector`. Parameters ---------- channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine bits : `Bits`, `list`, optional definition of bits for this `StateVector` host : `str`, optional URL of NDS server to use, defaults to observatory site host port : `int`, optional port number for NDS server query, must be given with `host` verify : `bool`, optional, default: `True` check channels exist in database before asking for data connection : `nds2.connection` open NDS connection to use verbose : `bool`, optional print verbose output about NDS progress type : `int`, optional NDS2 channel type integer dtype : `type`, `numpy.dtype`, `str`, optional identifier for desired output data type
def factorset_divide(factorset1, factorset2): r if not isinstance(factorset1, FactorSet) or not isinstance(factorset2, FactorSet): raise TypeError("factorset1 and factorset2 must be FactorSet instances") return factorset1.divide(factorset2, inplace=False)
r""" Base method for dividing two factor sets. Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the factors present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in :math:`\vec\phi_2`. Parameters ---------- factorset1: FactorSet The dividend factorset2: FactorSet The divisor Returns ------- The division of factorset1 and factorset2 Examples -------- >>> from pgmpy.factors import FactorSet >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.factors import factorset_divide >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8)) >>> factor_set1 = FactorSet(phi1, phi2) >>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8)) >>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8)) >>> factor_set2 = FactorSet(phi3, phi4) >>> factor_set3 = factorset_divide(factor_set2, factor_set1) >>> print(factor_set3) set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f119ad78f90>, <DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f119ad78e50>, <DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f119ad78ed0>, <DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f119ad78e90>])
def readInput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None): self.project_directory = directory with tmp_chdir(directory): session.add(self) self.read(directory, projectFileName, session, spatial, spatialReferenceID) if spatialReferenceID is None: spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory) replaceParamFile = self._readReplacementFiles(directory, session, spatial, spatialReferenceID) self._readXput(self.INPUT_FILES, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile) self._readXputMaps(self.INPUT_MAPS, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile) self._commit(session, self.COMMIT_ERROR_MESSAGE)
Read only input files for a GSSHA project into the database. Use this method to read a project when only pre-processing tasks need to be performed. Args: directory (str): Directory containing all GSSHA model files. This method assumes that all files are located in the same directory. projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj'). session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects. Defaults to False. spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails, default srid will be used (4326 for WGS 84).
def del_token(token): token_path = os.path.join(__opts__['token_dir'], token) if os.path.exists(token_path): return os.remove(token_path) is None return False
Delete an eauth token by name CLI Example: .. code-block:: shell salt-run auth.del_token 6556760736e4077daa601baec2b67c24
def list_nodes_full(call=None): response = _query('grid', 'server/list') ret = {} for item in response['list']: name = item['name'] ret[name] = item ret[name]['image_info'] = item['image'] ret[name]['image'] = item['image']['friendlyName'] ret[name]['size'] = item['ram']['name'] ret[name]['public_ips'] = [item['ip']['ip']] ret[name]['private_ips'] = [] ret[name]['state_info'] = item['state'] if 'active' in item['state']['description']: ret[name]['state'] = 'RUNNING' return ret
List nodes, with all available information CLI Example: .. code-block:: bash salt-cloud -F
def configure_host_cache(enabled, datastore=None, swap_size_MiB=None, service_instance=None): log.debug('Validating host cache input') schema = SimpleHostCacheSchema.serialize() try: jsonschema.validate({'enabled': enabled, 'datastore_name': datastore, 'swap_size_MiB': swap_size_MiB}, schema) except jsonschema.exceptions.ValidationError as exc: raise ArgumentValueError(exc) if not enabled: raise ArgumentValueError('Disabling the host cache is not supported') ret_dict = {'enabled': False} host_ref = _get_proxy_target(service_instance) hostname = __proxy__['esxi.get_details']()['esxi_host'] if datastore: ds_refs = salt.utils.vmware.get_datastores( service_instance, host_ref, datastore_names=[datastore]) if not ds_refs: raise VMwareObjectRetrievalError( 'Datastore \'{0}\' was not found on host ' '\'{1}\''.format(datastore, hostname)) ds_ref = ds_refs[0] salt.utils.vmware.configure_host_cache(host_ref, ds_ref, swap_size_MiB) return True
Configures the host cache on the selected host. enabled Boolean flag specifying whether the host cache is enabled. datastore Name of the datastore that contains the host cache. Must be set if enabled is ``true``. swap_size_MiB Swap size in Mibibytes. Needs to be set if enabled is ``true``. Must be smaller than the datastore size. service_instance Service instance (vim.ServiceInstance) of the vCenter/ESXi host. Default is None. .. code-block:: bash salt '*' vsphere.configure_host_cache enabled=False salt '*' vsphere.configure_host_cache enabled=True datastore=ds1 swap_size_MiB=1024
def log_coroutine(self, cor, *args, **kwargs): if self.stopping: raise LoopStoppingError("Could not launch coroutine because loop is shutting down: %s" % cor) self.start() cor = _instaniate_coroutine(cor, args, kwargs) def _run_and_log(): task = self.loop.create_task(cor) task.add_done_callback(lambda x: _log_future_exception(x, self._logger)) if self.inside_loop(): _run_and_log() else: self.loop.call_soon_threadsafe(_run_and_log)
Run a coroutine logging any exception raised. This routine will not block until the coroutine is finished nor will it return any result. It will just log if any exception is raised by the coroutine during operation. It is safe to call from both inside and outside the event loop. There is no guarantee on how soon the coroutine will be scheduled. Args: cor (coroutine): The coroutine that we wish to run in the background and wait until it finishes.
def abs(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.abs, tf.float32)
Returns a TensorFluent for the abs function. Args: x: The input fluent. Returns: A TensorFluent wrapping the abs function.
def save_hex(hex_file, path): if not hex_file: raise ValueError('Cannot flash an empty .hex file.') if not path.endswith('.hex'): raise ValueError('The path to flash must be for a .hex file.') with open(path, 'wb') as output: output.write(hex_file.encode('ascii'))
Given a string representation of a hex file, this function copies it to the specified path thus causing the device mounted at that point to be flashed. If the hex_file is empty it will raise a ValueError. If the filename at the end of the path does not end in '.hex' it will raise a ValueError.
def partial_path_match(path1, path2, kwarg_re=r'\{.*\}'): split_p1 = path1.split('/') split_p2 = path2.split('/') pat = re.compile(kwarg_re) if len(split_p1) != len(split_p2): return False for partial_p1, partial_p2 in zip(split_p1, split_p2): if pat.match(partial_p1) or pat.match(partial_p2): continue if not partial_p1 == partial_p2: return False return True
Validates if path1 and path2 matches, ignoring any kwargs in the string. We need this to ensure we can match Swagger patterns like: /foo/{id} against the observed pyramid path /foo/1 :param path1: path of a url :type path1: string :param path2: path of a url :type path2: string :param kwarg_re: regex pattern to identify kwargs :type kwarg_re: regex string :returns: boolean
def _group_by_equal_size(obj_list, tot_groups, threshold=pow(2, 32)): sorted_obj_list = sorted([(obj['size'], obj) for obj in obj_list], reverse=True) groups = [(random.random(), []) for _ in range(tot_groups)] if tot_groups <= 1: groups = _group_by_size_greedy(obj_list, tot_groups) return groups heapq.heapify(groups) for obj in sorted_obj_list: if obj[0] > threshold: heapq.heappush(groups, (obj[0], [obj[1]])) else: size, files = heapq.heappop(groups) size += obj[0] files.append(obj[1]) heapq.heappush(groups, (size, files)) groups = [group[1] for group in groups] return groups
Partition a list of objects evenly and by file size Files are placed according to largest file in the smallest bucket. If the file is larger than the given threshold, then it is placed in a new bucket by itself. :param obj_list: a list of dict-like objects with a 'size' property :param tot_groups: number of partitions to split the data :param threshold: the maximum size of each bucket :return: a list of lists, one for each partition
def notUnique(iterable, reportMax=INF): hash = {} n=0 if reportMax < 1: raise ValueError("`reportMax` must be >= 1 and is %r" % reportMax) for item in iterable: count = hash[item] = hash.get(item, 0) + 1 if count > 1: yield item n += 1 if n >= reportMax: return
Returns the elements in `iterable` that aren't unique; stops after it found `reportMax` non-unique elements. Examples: >>> list(notUnique([1,1,2,2,3,3])) [1, 2, 3] >>> list(notUnique([1,1,2,2,3,3], 1)) [1]
def locate(self, path): return Zconfig(lib.zconfig_locate(self._as_parameter_, path), False)
Find a config item along a path; leading slash is optional and ignored.
def wait(objects, count=None, timeout=None): for obj in objects: if not hasattr(obj, 'add_done_callback'): raise TypeError('Expecting sequence of waitable objects') if count is None: count = len(objects) if count < 0 or count > len(objects): raise ValueError('count must be between 0 and len(objects)') if count == 0: return [], objects pending = list(objects) done = [] try: for obj in _wait(pending, timeout): done.append(obj) if len(done) == count: break except Timeout: pass return done, list(filter(bool, pending))
Wait for one or more waitable objects. This method waits until *count* elements from the sequence of waitable objects *objects* have become ready. If *count* is ``None`` (the default), then wait for all objects to become ready. What "ready" is means depends on the object type. A waitable object is a objects that implements the ``add_done_callback()`` and ``remove_done_callback`` methods. This currently includes: * :class:`~gruvi.Event` - an event is ready when its internal flag is set. * :class:`~gruvi.Future` - a future is ready when its result is set. * :class:`~gruvi.Fiber` - a fiber is ready when has terminated. * :class:`~gruvi.Process` - a process is ready when the child has exited.
def boolean(flag): s = flag.lower() if s in ('1', 'yes', 'true'): return True elif s in ('0', 'no', 'false'): return False raise ValueError('Unknown flag %r' % s)
Convert string in boolean
def GetEntries(self, parser_mediator, top_level=None, **unused_kwargs): for entry in top_level: datetime_value = entry.get('date', None) package_identifiers = entry.get('packageIdentifiers', []) if not datetime_value or not package_identifiers: continue display_name = entry.get('displayName', '<UNKNOWN>') display_version = entry.get('displayVersion', '<DISPLAY_VERSION>') process_name = entry.get('processName', '<PROCESS_NAME>') package_identifiers = ', '.join(package_identifiers) event_data = plist_event.PlistTimeEventData() event_data.desc = ( 'Installation of [{0:s} {1:s}] using [{2:s}]. Packages: ' '{3:s}.').format( display_name, display_version, process_name, package_identifiers) event_data.key = '' event_data.root = '/item' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts relevant install history entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. top_level (dict[str, object]): plist top-level key.
def start(self): while True: self.thread_debug("Interval starting") for thr in threading.enumerate(): self.thread_debug(" " + str(thr)) self.feed_monitors() start = time.time() self.workers_queue.join() end = time.time() diff = self.config['interval']['test'] - (end - start) if diff <= 0: self.stats.procwin = -diff self.thread_debug("Cannot keep up with tests! {} seconds late" .format(abs(diff))) else: self.thread_debug("waiting {} seconds...".format(diff)) time.sleep(diff)
The main loop, run forever.
def process_post_media_attachment(self, bulk_mode, api_media_attachment): attachment = None if bulk_mode: attachment = self.ref_data_map["media"].get(api_media_attachment["ID"]) if not attachment: attachment, created = self.get_or_create_media(api_media_attachment) if attachment and not created: self.update_existing_media(attachment, api_media_attachment) if attachment: self.ref_data_map["media"][api_media_attachment["ID"]] = attachment return attachment
Create or update a Media attached to a post. :param bulk_mode: If True, minimize db operations by bulk creating post objects :param api_media_attachment: the API data for the Media :return: the Media attachment object
def __parse_namespace(self): if self.manifest.has_option('config', 'namespace'): return self.manifest.get('config', 'namespace') elif self.manifest.has_option('config', 'source'): return NAMESPACE_REGEX.search(self.manifest.get('config', 'source')).groups()[0] else: logger.warn('Could not parse namespace implicitely') return None
Parse the namespace from various sources
def gen_textfiles_from_filenames( filenames: Iterable[str]) -> Generator[TextIO, None, None]: for filename in filenames: with open(filename) as f: yield f
Generates file-like objects from a list of filenames. Args: filenames: iterable of filenames Yields: each file as a :class:`TextIO` object
def ystep(self): amidx = self.index_addmsk() Yi = self.cbpdn.AX[amidx] + self.cbpdn.U[amidx] self.inner_ystep() Yi[np.where(self.W.astype(np.bool))] = 0.0 self.cbpdn.Y[amidx] = Yi
This method is inserted into the inner cbpdn object, replacing its own ystep method, thereby providing a hook for applying the additional steps necessary for the AMS method.
def _list_getter(self): def get_child_element_list(obj): return obj.findall(qn(self._nsptagname)) get_child_element_list.__doc__ = ( 'A list containing each of the ``<%s>`` child elements, in the o' 'rder they appear.' % self._nsptagname ) return get_child_element_list
Return a function object suitable for the "get" side of a list property descriptor.
def local(self): assert self.name in CFG["container"]["images"].value tmp_dir = local.path(str(CFG["tmp_dir"])) target_dir = tmp_dir / self.name if not target_dir.exists() or not is_valid(self, target_dir): unpack(self, target_dir) return target_dir
Finds the current location of a container. Also unpacks the project if necessary. Returns: target: The path, where the container lies in the end.
def __get_host(node, vm_): if __get_ssh_interface(vm_) == 'private_ips' or vm_['external_ip'] is None: ip_address = node.private_ips[0] log.info('Salt node data. Private_ip: %s', ip_address) else: ip_address = node.public_ips[0] log.info('Salt node data. Public_ip: %s', ip_address) if ip_address: return ip_address return node.name
Return public IP, private IP, or hostname for the libcloud 'node' object
def get_gene_name(cls, entry): gene_name = entry.find("./gene/name[@type='primary']") return gene_name.text if gene_name is not None and gene_name.text.strip() else None
get primary gene name from XML node entry :param entry: XML node entry :return: str
def _find_most_recent_backup(normal_path: Optional[str]) -> Optional[str]: if normal_path is None: return None if os.path.exists(normal_path): return normal_path dirname, basename = os.path.split(normal_path) root, ext = os.path.splitext(basename) backups = [fi for fi in os.listdir(dirname) if fi.startswith(root) and fi.endswith(ext)] ts_re = re.compile(r'.*-([0-9]+)' + ext + '$') def ts_compare(filename): match = ts_re.match(filename) if not match: return -1 else: return int(match.group(1)) backups_sorted = sorted(backups, key=ts_compare) if not backups_sorted: return None return os.path.join(dirname, backups_sorted[-1])
Find the most recent old settings to migrate. The input is the path to an unqualified settings file - e.g. /mnt/usbdrive/config/robotSettings.json This will return - None if the input is None (to support chaining from dict.get()) - The input if it exists, or - The file named normal_path-TIMESTAMP.json with the highest timestamp if one can be found, or - None
def get_api_version(base_url, api_version=None, timeout=10, verify=True): versions = available_api_versions(base_url, timeout, verify) newest_version = max([float(i) for i in versions]) if api_version is None: api_version = newest_version else: if api_version not in versions: api_version = newest_version return api_version
Get the API version specified or resolve the latest version :return api version :rtype: float
def charm_icon_url(self, charm_id, channel=None): url = '{}/{}/icon.svg'.format(self.url, _get_path(charm_id)) return _add_channel(url, channel)
Generate the path to the icon for charms. @param charm_id The ID of the charm. @param channel Optional channel name. @return The url to the icon.
def key_rule(self, regex, verifier): if regex is not None: regex = re.compile(regex) self._additional_key_rules.append((regex, verifier))
Add a rule with a pattern that should apply to all keys. Any key not explicitly listed in an add_required or add_optional rule must match ONE OF the rules given in a call to key_rule(). So these rules are all OR'ed together. In this case you should pass a raw string specifying a regex that is used to determine if the rule is used to check a given key. Args: regex (str): The regular expression used to match the rule or None if this should apply to all verifier (Verifier): The verification rule
def print_coordinates(atoms, V, title=""): print(set_coordinates(atoms, V, title=title)) return
Print coordinates V with corresponding atoms to stdout in XYZ format. Parameters ---------- atoms : list List of element types V : array (N,3) matrix of atomic coordinates title : string (optional) Title of molecule
def penalty_satisfaction(response, bqm): record = response.record label_dict = response.variables.index if len(bqm.info['reduction']) == 0: return np.array([1] * len(record.sample)) penalty_vector = np.prod([record.sample[:, label_dict[qi]] * record.sample[:, label_dict[qj]] == record.sample[:, label_dict[valdict['product']]] for (qi, qj), valdict in bqm.info['reduction'].items()], axis=0) return penalty_vector
Creates a penalty satisfaction list Given a sampleSet and a bqm object, will create a binary list informing whether the penalties introduced during degree reduction are satisfied for each sample in sampleSet Args: response (:obj:`.SampleSet`): Samples corresponding to provided bqm bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains its reduction info. Returns: :obj:`numpy.ndarray`: a binary array of penalty satisfaction information
def get_logger(name=None): logger = logging.getLogger(name) if len(logger.handlers) == 0: logger = add_stream_handler(logger) return logger
Get a logging handle. As with ``setup_logging``, a stream handler is added to the log handle. Arguments: name (str): Name of the log handle. Default is ``None``.
def write_alignment(self, filename, file_format, interleaved=None): if file_format == 'phylip': file_format = 'phylip-relaxed' AlignIO.write(self._msa, filename, file_format)
Write the alignment to file using Bio.AlignIO
def diff_list(self, list1, list2): for key in list1: if key in list2 and list2[key] != list1[key]: print key elif key not in list2: print key
Extracts differences between lists. For debug purposes
def astuple(self, encoding=None): if not encoding: return ( self.id, self.seqid, self.source, self.featuretype, self.start, self.end, self.score, self.strand, self.frame, helpers._jsonify(self.attributes), helpers._jsonify(self.extra), self.calc_bin() ) return ( self.id.decode(encoding), self.seqid.decode(encoding), self.source.decode(encoding), self.featuretype.decode(encoding), self.start, self.end, self.score.decode(encoding), self.strand.decode(encoding), self.frame.decode(encoding), helpers._jsonify(self.attributes).decode(encoding), helpers._jsonify(self.extra).decode(encoding), self.calc_bin() )
Return a tuple suitable for import into a database. Attributes field and extra field jsonified into strings. The order of fields is such that they can be supplied as arguments for the query defined in :attr:`gffutils.constants._INSERT`. If `encoding` is not None, then convert string fields to unicode using the provided encoding. Returns ------- Tuple
def find_by_project(self, project, params={}, **options): path = "/projects/%s/sections" % (project) return self.client.get(path, params, **options)
Returns the compact records for all sections in the specified project. Parameters ---------- project : {Id} The project to get sections from. [params] : {Object} Parameters for the request
def Parse(self, raw_data): self.results = raw_data for f in self.filters: self.results = f.Parse(self.results) return self.results
Take the results and yield results that passed through the filters. The output of each filter is used as the input for successive filters. Args: raw_data: An iterable series of rdf values. Returns: A list of rdf values that matched all filters.
def join_paths(fnames:FilePathList, path:PathOrStr='.')->Collection[Path]: "Join `path` to every file name in `fnames`." path = Path(path) return [join_path(o,path) for o in fnames]
Join `path` to every file name in `fnames`.
def _determine_rotated_logfile(self): rotated_filename = self._check_rotated_filename_candidates() if rotated_filename and exists(rotated_filename): if stat(rotated_filename).st_ino == self._offset_file_inode: return rotated_filename if stat(self.filename).st_ino == self._offset_file_inode: if self.copytruncate: return rotated_filename else: sys.stderr.write( "[pygtail] [WARN] file size of %s shrank, and copytruncate support is " "disabled (expected at least %d bytes, was %d bytes).\n" % (self.filename, self._offset, stat(self.filename).st_size)) return None
We suspect the logfile has been rotated, so try to guess what the rotated filename is, and return it.
def _get_build_command(self, mkdocs_site_path: Path) -> str: components = [self._mkdocs_config.get('mkdocs_path', 'mkdocs')] components.append('build') components.append(f'-d "{self._escape_control_characters(str(mkdocs_site_path))}"') command = ' '.join(components) self.logger.debug(f'Build command: {command}') return command
Generate ``mkdocs build`` command to build the site. :param mkdocs_site_path: Path to the output directory for the site
def _ReloadArtifacts(self): self._artifacts = {} self._LoadArtifactsFromFiles(self._sources.GetAllFiles()) self.ReloadDatastoreArtifacts()
Load artifacts from all sources.
def run_samtools(align_bams, items, ref_file, assoc_files, region=None, out_file=None): return shared_variantcall(_call_variants_samtools, "samtools", align_bams, ref_file, items, assoc_files, region, out_file)
Detect SNPs and indels with samtools mpileup and bcftools.
def does_schema_exist(self, connection): if '.' in self.table: query = ("select 1 as schema_exists " "from pg_namespace " "where nspname = lower(%s) limit 1") else: return True cursor = connection.cursor() try: schema = self.table.split('.')[0] cursor.execute(query, [schema]) result = cursor.fetchone() return bool(result) finally: cursor.close()
Determine whether the schema already exists.
def distribute(self, f, n): if self.pool is None: return [f(i) for i in range(n)] else: return self.pool.map(f, range(n))
Distribute the computations amongst the multiprocessing pools Parameters ---------- f : function Function to be distributed to the processors n : int The values in range(0,n) will be passed as arguments to the function f.
def restore_default(self, index): spec = self.get_configspec_str(index) if spec is None or isinstance(spec, Section): return try: default = self._vld.get_default_value(spec) defaultstr = self._val_to_str(default) self.setData(index, defaultstr) except KeyError: raise ConfigError("Missing Default Value in spec: \"%s\"" % spec)
Set the value of the given index row to its default :param index: :type index: :returns: :rtype: :raises:
def get_resource_attribute(resource_attr_id, **kwargs): resource_attr_qry = db.DBSession.query(ResourceAttr).filter( ResourceAttr.id == resource_attr_id, ) resource_attr = resource_attr_qry.first() if resource_attr is None: raise ResourceNotFoundError("Resource attribute %s does not exist", resource_attr_id) return resource_attr
Get a specific resource attribte, by ID If type_id is Gspecified, only return the resource attributes within the type.
def bool_env(key, default=False): try: return os.environ[key].lower() in TRUE except KeyError: return default
Parse an environment variable as a boolean switch `True` is returned if the variable value matches one of the following: - ``'1'`` - ``'y'`` - ``'yes'`` - ``'true'`` The match is case-insensitive (so ``'Yes'`` will match as `True`) Parameters ---------- key : `str` the name of the environment variable to find default : `bool` the default return value if the key is not found Returns ------- True if the environment variable matches as 'yes' or similar False otherwise Examples -------- >>> import os >>> from gwpy.utils.env import bool_env >>> os.environ['GWPY_VALUE'] = 'yes' >>> print(bool_env('GWPY_VALUE')) True >>> os.environ['GWPY_VALUE'] = 'something else' >>> print(bool_env('GWPY_VALUE')) False >>> print(bool_env('GWPY_VALUE2')) False
def reana_ready(): from reana_commons.config import REANA_READY_CONDITIONS for module_name, condition_list in REANA_READY_CONDITIONS.items(): for condition_name in condition_list: module = importlib.import_module(module_name) condition_func = getattr(module, condition_name) if not condition_func(): return False return True
Check if reana can start new workflows.
def pcc_pos(self, row1, row2): mean1 = np.mean(row1) mean2 = np.mean(row2) a = 0 x = 0 y = 0 for n1, n2 in zip(row1, row2): a += (n1 - mean1) * (n2 - mean2) x += (n1 - mean1) ** 2 y += (n2 - mean2) ** 2 if a == 0: return 0 else: return a / sqrt(x * y)
Calculate the Pearson correlation coefficient of one position compared to another position. Returns ------- score : float Pearson correlation coefficient.
def _rndLetterTransform(self, image): w, h = image.size dx = w * random.uniform(0.2, 0.7) dy = h * random.uniform(0.2, 0.7) x1, y1 = self.__class__._rndPointDisposition(dx, dy) x2, y2 = self.__class__._rndPointDisposition(dx, dy) w += abs(x1) + abs(x2) h += abs(x1) + abs(x2) quad = self.__class__._quadPoints((w, h), (x1, y1), (x2, y2)) return image.transform(image.size, Image.QUAD, data=quad, resample=self.resample)
Randomly morph a single character.
def build(self, ignore=None): self._prepare_workspace() self.install_dependencies() self.package(ignore)
Calls all necessary methods to build the Lambda Package
def parse(self, string, strict=True): if isinstance(string, bytes): errors = 'strict' if strict else 'replace' string = string.decode(self.encoding, errors=errors) if not self.raw: self.raw = string else: self.raw += string lines = unfold_lines(string).splitlines() for line in lines: if line: if ':' not in line: if strict: raise ValueError('Field missing colon.') else: continue name, value = line.split(':', 1) name = name.strip() value = value.strip() self.add(name, value)
Parse the string or bytes. Args: strict (bool): If True, errors will not be ignored Raises: :class:`ValueError` if the record is malformed.
def out_of_date(self): try: latest_remote_sha = self.pr_commits(self.pull_request.refresh(True))[-1].sha print("Latest remote sha: {}".format(latest_remote_sha)) try: print("Ratelimit remaining: {}".format(self.github.ratelimit_remaining)) except Exception: print("Failed to look up ratelimit remaining") return self.last_sha != latest_remote_sha except IndexError: return False
Check if our local latest sha matches the remote latest sha
def guess_python_env(): version, major, minor = get_version_info() if 'PyPy' in version: return 'pypy3' if major == 3 else 'pypy' return 'py{major}{minor}'.format(major=major, minor=minor)
Guess the default python env to use.
def metapolicy(request, permitted, domains=None): if domains is None: domains = [] policy = policies.Policy(*domains) policy.metapolicy(permitted) return serve(request, policy)
Serves a cross-domain policy which can allow other policies to exist on the same domain. Note that this view, if used, must be the master policy for the domain, and so must be served from the URL ``/crossdomain.xml`` on the domain: setting metapolicy information in other policy files is forbidden by the cross-domain policy specification. **Required arguments:** ``permitted`` A string indicating the extent to which other policies are permitted. A set of constants is available in ``flashpolicies.policies``, defining acceptable values for this argument. **Optional arguments:** ``domains`` A list of domains from which to allow access. Each value may be either a domain name (e.g., ``example.com``) or a wildcard (e.g., ``*.example.com``). Due to serious potential security issues, it is strongly recommended that you not use wildcard domain values.
def solve(expected: List[Tuple[float, float]], actual: List[Tuple[float, float]]) -> np.ndarray: ex = np.array([ list(point) + [1] for point in expected ]).transpose() ac = np.array([ list(point) + [1] for point in actual ]).transpose() transform = np.dot(ac, inv(ex)) return transform
Takes two lists of 3 x-y points each, and calculates the matrix representing the transformation from one space to the other. The 3x3 matrix returned by this method represents the 2-D transformation matrix from the actual point to the expected point. Example: If the expected points are: [ (1, 1), (2, 2), (1, 2) ] And the actual measured points are: [ (1.1, 1.1), (2.1, 2.1), (1.1, 2.1) ] (in other words, a shift of exaxtly +0.1 in both x and y) Then the resulting transformation matrix T should be: [ 1 0 -0.1 ] [ 0 1 -0.1 ] [ 0 0 1 ] Then, if we take a 3x3 matrix B representing one of the measured points on the deck: [ 1 0 1.1 ] [ 0 1 2.1 ] [ 0 0 1 ] The B*T will yeild the "actual" point: [ 1 0 1 ] [ 0 1 2 ] [ 0 0 1 ] The return value of this function is the transformation matrix T
def scan(cls, formats=ALL_CODE_TYPES, camera=-1): app = AndroidApplication.instance() r = app.create_future() pkg = BarcodePackage.instance() pkg.setBarcodeResultListener(pkg.getId()) pkg.onBarcodeResult.connect(r.set_result) intent = cls(app) if formats: intent.setDesiredBarcodeFormats(formats) if camera != -1: intent.setCameraId(camera) intent.initiateScan() return r
Shortcut only one at a time will work...
def list_voices( self, language_code=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "list_voices" not in self._inner_api_calls: self._inner_api_calls[ "list_voices" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_voices, default_retry=self._method_configs["ListVoices"].retry, default_timeout=self._method_configs["ListVoices"].timeout, client_info=self._client_info, ) request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code) return self._inner_api_calls["list_voices"]( request, retry=retry, timeout=timeout, metadata=metadata )
Returns a list of ``Voice`` supported for synthesis. Example: >>> from google.cloud import texttospeech_v1beta1 >>> >>> client = texttospeech_v1beta1.TextToSpeechClient() >>> >>> response = client.list_voices() Args: language_code (str): Optional (but recommended) `BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag. If specified, the ListVoices call will only return voices that can be used to synthesize this language\_code. E.g. when specifying "en-NZ", you will get supported "en-*" voices; when specifying "no", you will get supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" will also get supported "cmn-*" voices; specifying "zh-hk" will also get supported "yue-\*" voices. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def fill(text, width=70, **kwargs): w = ParagraphWrapper(width=width, **kwargs) return w.fill(text)
Fill multiple paragraphs of text, returning a new string. Reformat multiple paragraphs in 'text' to fit in lines of no more than 'width' columns, and return a new string containing the entire wrapped text. As with wrap(), tabs are expanded and other whitespace characters converted to space. See ParagraphWrapper class for available keyword args to customize wrapping behaviour.
def _ordered(generator, *args, **kwargs): unordered_dict = {k: v for k, v in generator(*args, **kwargs)} keys = sorted(list(dict(unordered_dict).keys())) result = OrderedDict() for key in keys: result[key] = unordered_dict[key] return result
Sort keys of unordered_dict and store in OrderedDict.
def list(self): mask = results = self.client.call('Account', 'getReservedCapacityGroups', mask=mask) return results
List Reserved Capacities
def get_version(self): raw_version = run_cmd(["podman", "version"], return_output=True) regex = re.compile(r"Version:\s*(\d+)\.(\d+)\.(\d+)") match = regex.findall(raw_version) try: return match[0] except IndexError: logger.error("unable to parse version from `podman version`") return
return 3-tuple of version info or None :return: (str, str, str)
def run(): print('Python ' + sys.version.replace('\n', '')) try: oscrypto_tests_module_info = imp.find_module('tests', [os.path.join(build_root, 'oscrypto')]) oscrypto_tests = imp.load_module('oscrypto.tests', *oscrypto_tests_module_info) oscrypto = oscrypto_tests.local_oscrypto() print('\noscrypto backend: %s' % oscrypto.backend()) except (ImportError): pass if run_lint: print('') lint_result = run_lint() else: lint_result = True if run_coverage: print('\nRunning tests (via coverage.py)') sys.stdout.flush() tests_result = run_coverage(ci=True) else: print('\nRunning tests') sys.stdout.flush() tests_result = run_tests() sys.stdout.flush() return lint_result and tests_result
Runs the linter and tests :return: A bool - if the linter and tests ran successfully
def action_ipset(reader, *args): ip_set = set() for record in reader: if record.log_status in (SKIPDATA, NODATA): continue ip_set.add(record.srcaddr) ip_set.add(record.dstaddr) for ip in ip_set: print(ip)
Show the set of IPs seen in Flow Log records.
def get_movielens(variant="20m"): filename = "movielens_%s.hdf5" % variant path = os.path.join(_download.LOCAL_CACHE_DIR, filename) if not os.path.isfile(path): log.info("Downloading dataset to '%s'", path) _download.download_file(URL_BASE + filename, path) else: log.info("Using cached dataset at '%s'", path) with h5py.File(path, 'r') as f: m = f.get('movie_user_ratings') plays = csr_matrix((m.get('data'), m.get('indices'), m.get('indptr'))) return np.array(f['movie']), plays
Gets movielens datasets Parameters --------- variant : string Which version of the movielens dataset to download. Should be one of '20m', '10m', '1m' or '100k'. Returns ------- movies : ndarray An array of the movie titles. ratings : csr_matrix A sparse matrix where the row is the movieId, the column is the userId and the value is the rating.
def _ilshift(self, n): assert 0 < n <= self.len self._append(Bits(n)) self._truncatestart(n) return self
Shift bits by n to the left in place. Return self.
def deliver(self, message): config = self.config success = config.success failure = config.failure exhaustion = config.exhaustion if getattr(message, 'die', False): 1/0 if failure: chance = random.randint(0,100001) / 100000.0 if chance < failure: raise TransportFailedException("Mock failure.") if exhaustion: chance = random.randint(0,100001) / 100000.0 if chance < exhaustion: raise TransportExhaustedException("Mock exhaustion.") if success == 1.0: return True chance = random.randint(0,100001) / 100000.0 if chance <= success: return True return False
Concrete message delivery.
def weld_vec_of_struct_to_struct_of_vec(vec_of_structs, weld_types): obj_id, weld_obj = create_weld_object(vec_of_structs) appenders = struct_of('appender[{e}]', weld_types) types = struct_of('{e}', weld_types) merges = struct_of('merge(b.${i}, e.${i})', weld_types) result = struct_of('result(vecs.${i})', weld_types) weld_template = weld_obj.weld_code = weld_template.format(vec_of_struct=obj_id, appenders=appenders, types=types, merges=merges, result=result) return weld_obj
Create a struct of vectors. Parameters ---------- vec_of_structs : WeldObject Encoding a vector of structs. weld_types : list of WeldType The Weld types of the arrays in the same order. Returns ------- WeldObject Representation of this computation.
def oq_server_context_processor(request): context = {} context['oq_engine_server_url'] = ('//' + request.META.get('HTTP_HOST', 'localhost:8800')) context['oq_engine_version'] = oqversion context['server_name'] = settings.SERVER_NAME return context
A custom context processor which allows injection of additional context variables.
def write_vaultlocker_conf(context, priority=100): charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( hookenv.service_name() ) host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) templating.render(source='vaultlocker.conf.j2', target=charm_vl_path, context=context, perms=0o600), alternatives.install_alternative('vaultlocker.conf', '/etc/vaultlocker/vaultlocker.conf', charm_vl_path, priority)
Write vaultlocker configuration to disk and install alternative :param context: Dict of data from vault-kv relation :ptype: context: dict :param priority: Priority of alternative configuration :ptype: priority: int
def _init_enrichment(self): if self.study_n: return 'e' if ((1.0 * self.study_count / self.study_n) > (1.0 * self.pop_count / self.pop_n)) else 'p' return 'p'
Mark as 'enriched' or 'purified'.
def tolist(self) -> List[bool]: result = [False] * 64 for square in self: result[square] = True return result
Convert the set to a list of 64 bools.
def get_action_arguments(self, service_name, action_name): return self.services[service_name].actions[action_name].info
Returns a list of tuples with all known arguments for the given service- and action-name combination. The tuples contain the argument-name, direction and data_type.
def changes(self): output = [] if self.status() is self.UNMODIFIED: output = [self.formatter % (' ', self.key, self.old_value)] elif self.status() is self.ADDED: output.append(self.formatter % ('+', self.key, self.new_value)) elif self.status() is self.REMOVED: output.append(self.formatter % ('-', self.key, self.old_value)) elif self.status() is self.MODIFIED: output.append(self.formatter % ('-', self.key, self.old_value)) output.append(self.formatter % ('+', self.key, self.new_value)) return output
Returns a list of changes to represent the diff between old and new value. Returns: list: [string] representation of the change (if any) between old and new value
def start(self, interval_s): if self.running: return False self.stopped.clear() def _execute(): if not self.method() and self.stop_if_false: return while not self.stopped.wait(interval_s): if not self.method() and self.stop_if_false: return self.thread = threading.Thread(target=_execute) self.thread.daemon = True self.thread.start() return True
Starts executing the method at the specified interval. Args: interval_s: The amount of time between executions of the method. Returns: False if the interval was already running.
def _onAs( self, name ): " Memorizes an alias for an import or an imported item " if self.__lastImport.what: self.__lastImport.what[ -1 ].alias = name else: self.__lastImport.alias = name return
Memorizes an alias for an import or an imported item
def extract_largest(self, inplace=False): mesh = self.connectivity(largest=True) if inplace: self.overwrite(mesh) else: return mesh
Extract largest connected set in mesh. Can be used to reduce residues obtained when generating an isosurface. Works only if residues are not connected (share at least one point with) the main component of the image. Parameters ---------- inplace : bool, optional Updates mesh in-place while returning nothing. Returns ------- mesh : vtki.PolyData Largest connected set in mesh
def dflt_sortby_ntgoea(ntgoea): return [ntgoea.enrichment, ntgoea.namespace, ntgoea.p_uncorrected, ntgoea.depth, ntgoea.GO]
Default sorting of GOEA results stored in namedtuples.
def close(self): self._closed = True if self.receive_task: self.receive_task.cancel() if self.connection: self.connection.close()
Close the underlying connection.
def fit_predict(self, data, labels, unkown=None): self.fit(data, labels) return self._predict_from_bmus(self._bmus, unkown)
\ Fit and classify data efficiently. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param labels: the labels associated with data :type labels: iterable :param unkown: the label to attribute if no label is known :returns: the labels guessed for data :rtype: `numpy.array`
def autoidlepc(self, compute_id, platform, image, ram): compute = self.get_compute(compute_id) for project in list(self._projects.values()): if project.name == "AUTOIDLEPC": yield from project.delete() self.remove_project(project) project = yield from self.add_project(name="AUTOIDLEPC") node = yield from project.add_node(compute, "AUTOIDLEPC", str(uuid.uuid4()), node_type="dynamips", platform=platform, image=image, ram=ram) res = yield from node.dynamips_auto_idlepc() yield from project.delete() self.remove_project(project) return res
Compute and IDLE PC value for an image :param compute_id: ID of the compute where the idlepc operation need to run :param platform: Platform type :param image: Image to use :param ram: amount of RAM to use
def beholder_ng(func): @functools.wraps(func) def behold(file, length, *args, **kwargs): seek_cur = file.tell() try: return func(file, length, *args, **kwargs) except Exception: from pcapkit.protocols.raw import Raw error = traceback.format_exc(limit=1).strip().split(os.linesep)[-1] file.seek(seek_cur, os.SEEK_SET) next_ = Raw(file, length, error=error) return next_ return behold
Behold analysis procedure.
def hungarian(A, B): distances = cdist(A, B, 'euclidean') indices_a, indices_b = linear_sum_assignment(distances) return indices_b
Hungarian reordering. Assume A and B are coordinates for atoms of SAME type only
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day, environ=None): filename = get_benchmark_filename(symbol) data = _load_cached_data(filename, first_date, last_date, now, 'benchmark', environ) if data is not None: return data logger.info( ('Downloading benchmark data for {symbol!r} ' 'from {first_date} to {last_date}'), symbol=symbol, first_date=first_date - trading_day, last_date=last_date ) try: data = get_benchmark_returns(symbol) data.to_csv(get_data_filepath(filename, environ)) except (OSError, IOError, HTTPError): logger.exception('Failed to cache the new benchmark returns') raise if not has_data_for_dates(data, first_date, last_date): logger.warn( ("Still don't have expected benchmark data for {symbol!r} " "from {first_date} to {last_date} after redownload!"), symbol=symbol, first_date=first_date - trading_day, last_date=last_date ) return data
Ensure we have benchmark data for `symbol` from `first_date` to `last_date` Parameters ---------- symbol : str The symbol for the benchmark to load. first_date : pd.Timestamp First required date for the cache. last_date : pd.Timestamp Last required date for the cache. now : pd.Timestamp The current time. This is used to prevent repeated attempts to re-download data that isn't available due to scheduling quirks or other failures. trading_day : pd.CustomBusinessDay A trading day delta. Used to find the day before first_date so we can get the close of the day prior to first_date. We attempt to download data unless we already have data stored at the data cache for `symbol` whose first entry is before or on `first_date` and whose last entry is on or after `last_date`. If we perform a download and the cache criteria are not satisfied, we wait at least one hour before attempting a redownload. This is determined by comparing the current time to the result of os.path.getmtime on the cache path.
def add_chars(self, chars): 'Add given chars to char set' for c in chars: if self._ignorecase: self._whitelist_chars.add(c.lower()) self._whitelist_chars.add(c.upper()) else: self._whitelist_chars.add(c)
Add given chars to char set
def getidfkeyswithnodes(): idf = IDF(StringIO("")) keys = idfobjectkeys(idf) keysfieldnames = ((key, idf.newidfobject(key.upper()).fieldnames) for key in keys) keysnodefdnames = ((key, (name for name in fdnames if (name.endswith('Node_Name')))) for key, fdnames in keysfieldnames) nodekeys = [key for key, fdnames in keysnodefdnames if list(fdnames)] return nodekeys
return a list of keys of idfobjects that hve 'None Name' fields
def update_lincs_proteins(): url = 'http://lincs.hms.harvard.edu/db/proteins/' prot_data = load_lincs_csv(url) prot_dict = {d['HMS LINCS ID']: d.copy() for d in prot_data} assert len(prot_dict) == len(prot_data), "We lost data." fname = os.path.join(path, 'lincs_proteins.json') with open(fname, 'w') as fh: json.dump(prot_dict, fh, indent=1)
Load the csv of LINCS protein metadata into a dict. Produces a dict keyed by HMS LINCS protein ids, with the metadata contained in a dict of row values keyed by the column headers extracted from the csv.
def _delly_exclude_file(items, base_file, chrom): base_exclude = sshared.prepare_exclude_file(items, base_file, chrom) out_file = "%s-delly%s" % utils.splitext_plus(base_exclude) with file_transaction(items[0], out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: with open(base_exclude) as in_handle: for line in in_handle: parts = line.split("\t") if parts[0] == chrom: out_handle.write(line) else: out_handle.write("%s\n" % parts[0]) return out_file
Prepare a delly-specific exclude file eliminating chromosomes. Delly wants excluded chromosomes listed as just the chromosome, with no coordinates.
def _get_regions(self): if self._specs_in[_REGIONS_STR] == 'all': return [_get_all_objs_of_type( Region, getattr(self._obj_lib, 'regions', self._obj_lib) )] else: return [set(self._specs_in[_REGIONS_STR])]
Get the requested regions.
def slanted_triangular(max_rate, num_steps, cut_frac=0.1, ratio=32, decay=1, t=0.0): cut = int(num_steps * cut_frac) while True: t += 1 if t < cut: p = t / cut else: p = 1 - ((t - cut) / (cut * (1 / cut_frac - 1))) learn_rate = max_rate * (1 + p * (ratio - 1)) * (1 / ratio) yield learn_rate
Yield an infinite series of values according to Howard and Ruder's "slanted triangular learning rate" schedule.
def loadDict(filename): filename = os.path.expanduser(filename) if not splitext(filename)[1]: filename += ".bpickle" f = None try: f = open(filename, "rb") varH = cPickle.load(f) finally: if f: f.close() return varH
Return the variables pickled pickled into `filename` with `saveVars` as a dict.
def movies_box_office(self, **kwargs): path = self._get_path('movies_box_office') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the top box office earning movies from the API. Sorted by most recent weekend gross ticket sales. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
def eradicate_pgroup(self, pgroup, **kwargs): eradicate = {"eradicate": True} eradicate.update(kwargs) return self._request("DELETE", "pgroup/{0}".format(pgroup), eradicate)
Eradicate a destroyed pgroup. :param pgroup: Name of pgroup to be eradicated. :type pgroup: str :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **DELETE pgroup/:pgroup** :type \*\*kwargs: optional :returns: A dictionary mapping "name" to pgroup. :rtype: ResponseDict .. note:: Requires use of REST API 1.2 or later.
def run(self, instream=sys.stdin): sys.stdout.write(self.prompt) sys.stdout.flush() while True: line = instream.readline() try: self.exec_cmd(line) except Exception as e: self.errfun(e) sys.stdout.write(self.prompt) sys.stdout.flush()
Runs the CLI, reading from sys.stdin by default
def node_to_complex_fault_geometry(node): assert "complexFaultGeometry" in node.tag intermediate_edges = [] for subnode in node.nodes: if "faultTopEdge" in subnode.tag: top_edge = linestring_node_to_line(subnode.nodes[0], with_depth=True) elif "intermediateEdge" in subnode.tag: int_edge = linestring_node_to_line(subnode.nodes[0], with_depth=True) intermediate_edges.append(int_edge) elif "faultBottomEdge" in subnode.tag: bottom_edge = linestring_node_to_line(subnode.nodes[0], with_depth=True) else: pass return [top_edge] + intermediate_edges + [bottom_edge]
Reads a complex fault geometry node and returns an
def format(self, record: logging.LogRecord) -> str: if platform.system() != 'Linux': return super().format(record) record.msg = ( self.STYLE[record.levelname] + record.msg + self.STYLE['END']) record.levelname = ( self.STYLE['LEVEL'] + record.levelname + self.STYLE['END']) return super().format(record)
Format log records to produce colored messages. :param record: log record :return: log message