code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def number_of_states(dtrajs, only_used = False): r dtrajs = _ensure_dtraj_list(dtrajs) if only_used: bc = count_states(dtrajs) return np.count_nonzero(bc) else: imax = 0 for dtraj in dtrajs: imax = max(imax, np.max(dtraj)) return imax+1
r"""returns the number of states in the given trajectories. Parameters ---------- dtraj : array_like or list of array_like Discretized trajectory or list of discretized trajectories only_used = False : boolean If False, will return max+1, where max is the largest index used. If True, will return the number of states that occur at least once.
def add_exception(self, exception, stack, remote=False): self._check_ended() self.add_fault_flag() if hasattr(exception, '_recorded'): setattr(self, 'cause', getattr(exception, '_cause_id')) return exceptions = [] exceptions.append(Throwable(exception, stack, remote)) self.cause['exceptions'] = exceptions self.cause['working_directory'] = os.getcwd()
Add an exception to trace entities. :param Exception exception: the catched exception. :param list stack: the output from python built-in `traceback.extract_stack()`. :param bool remote: If False it means it's a client error instead of a downstream service.
def num_unused_cpus(thresh=10): import psutil cpu_usage = psutil.cpu_percent(percpu=True) return sum([p < thresh for p in cpu_usage])
Returns the number of cpus with utilization less than `thresh` percent
def stft(func=None, **kwparams): from numpy.fft import fft, ifft return stft.base(transform=fft, inverse_transform=ifft)(func, **kwparams)
Short Time Fourier Transform for complex data. Same to the default STFT strategy, but with new defaults. This is the same to: .. code-block:: python stft.base(transform=numpy.fft.fft, inverse_transform=numpy.fft.ifft) See ``stft.base`` docs for more.
def get_share_file (filename, devel_dir=None): paths = [get_share_dir()] if devel_dir is not None: paths.insert(0, devel_dir) for path in paths: fullpath = os.path.join(path, filename) if os.path.isfile(fullpath): return fullpath msg = "%s not found in %s; check your installation" % (filename, paths) raise ValueError(msg)
Return a filename in the share directory. @param devel_dir: directory to search when developing @ptype devel_dir: string @param filename: filename to search for @ptype filename: string @return: the found filename or None @rtype: string @raises: ValueError if not found
def _raise_exception(self, eobj, edata=None): _, _, tbobj = sys.exc_info() if edata: emsg = self._format_msg(eobj["msg"], edata) _rwtb(eobj["type"], emsg, tbobj) else: _rwtb(eobj["type"], eobj["msg"], tbobj)
Raise exception by name.
def set_empty_text(self): self.buffer.insert_with_tags_by_name( self.buffer.get_start_iter(), self.empty_text, 'empty-text')
Display the empty text
def insert(self, state, token): if token == EndSymbol(): return self[state][EndSymbol()] from pydsl.check import check symbol_list = [x for x in self[state] if isinstance(x, TerminalSymbol) and check(x.gd, [token])] if not symbol_list: return {"action":"Fail"} if len(symbol_list) > 1: raise Exception("Multiple symbols matches input") symbol = symbol_list[0] return self[state][symbol]
change internal state, return action
def accumulate(self, axis: AxisIdentifier) -> HistogramBase: new_one = self.copy() axis_id = self._get_axis(axis) new_one._frequencies = np.cumsum(new_one.frequencies, axis_id[0]) return new_one
Calculate cumulative frequencies along a certain axis. Returns ------- new_hist: Histogram of the same type & size
def version(self, bundle: str, date: dt.datetime) -> models.Version: return (self.Version.query .join(models.Version.bundle) .filter(models.Bundle.name == bundle, models.Version.created_at == date) .first())
Fetch a version from the store.
def clear_all_flair(self): csv = [{'user': x['user']} for x in self.get_flair_list(limit=None)] if csv: return self.set_flair_csv(csv) else: return
Remove all user flair on this subreddit. :returns: The json response from the server when there is flair to clear, otherwise returns None.
def contains(self, value): str_value = StringConverter.to_nullable_string(value) for element in self: str_element = StringConverter.to_string(element) if str_value == None and str_element == None: return True if str_value == None or str_element == None: continue if str_value == str_element: return True return False
Checks if this array contains a value. The check uses direct comparison between elements and the specified value. :param value: a value to be checked :return: true if this array contains the value or false otherwise.
def unit_system_id(self): if self._unit_system_id is None: hash_data = bytearray() for k, v in sorted(self.lut.items()): hash_data.extend(k.encode("utf8")) hash_data.extend(repr(v).encode("utf8")) m = md5() m.update(hash_data) self._unit_system_id = str(m.hexdigest()) return self._unit_system_id
This is a unique identifier for the unit registry created from a FNV hash. It is needed to register a dataset's code unit system in the unit system registry.
def prepare_gag_lsm(self, lsm_precip_data_var, lsm_precip_type, interpolation_type=None): if self.l2g is None: raise ValueError("LSM converter not loaded ...") for unif_precip_card in self.UNIFORM_PRECIP_CARDS: self.project_manager.deleteCard(unif_precip_card, self.db_session) with tmp_chdir(self.project_manager.project_directory): out_gage_file = '{0}.gag'.format(self.project_manager.name) self.l2g.lsm_precip_to_gssha_precip_gage(out_gage_file, lsm_data_var=lsm_precip_data_var, precip_type=lsm_precip_type) self._update_simulation_end_from_lsm() self.set_simulation_duration(self.simulation_end-self.simulation_start) self.add_precip_file(out_gage_file, interpolation_type) self.l2g.xd.close()
Prepares Gage output for GSSHA simulation Parameters: lsm_precip_data_var(list or str): String of name for precipitation variable name or list of precip variable names. See: :func:`~gsshapy.grid.GRIDtoGSSHA.lsm_precip_to_gssha_precip_gage`. lsm_precip_type(str): Type of precipitation. See: :func:`~gsshapy.grid.GRIDtoGSSHA.lsm_precip_to_gssha_precip_gage`. interpolation_type(str): Type of interpolation for LSM precipitation. Can be "INV_DISTANCE" or "THIESSEN". Default is "THIESSEN".
def __get_supported_file_types_string(self): languages = ["All Files (*)"] for language in self.__languages_model.languages: languages.append("{0} Files ({1})".format(language.name, " ".join(language.extensions.split("|")).replace("\\", "*"))) return ";;".join(languages)
Returns the supported file types dialog string.
def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) try: return max(best_list, key=lambda i: i[1]) except ValueError: return None
Find the single best match above a score in a list of choices. This is a convenience method which returns the single best choice. See extract() for the full arguments list. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. If the best match is found, but it is not greater than this number, then return None anyway ("not a good enough match"). Defaults to 0. Returns: A tuple containing a single match and its score, if a match was found that was above score_cutoff. Otherwise, returns None.
def path_list(self, sep=os.pathsep): from pathlib import Path return [ Path(pathstr) for pathstr in self.split(sep) ]
Return list of Path objects.
async def zrange(self, name, start, end, desc=False, withscores=False, score_cast_func=float): if desc: return await self.zrevrange(name, start, end, withscores, score_cast_func) pieces = ['ZRANGE', name, start, end] if withscores: pieces.append(b('WITHSCORES')) options = { 'withscores': withscores, 'score_cast_func': score_cast_func } return await self.execute_command(*pieces, **options)
Return a range of values from sorted set ``name`` between ``start`` and ``end`` sorted in ascending order. ``start`` and ``end`` can be negative, indicating the end of the range. ``desc`` a boolean indicating whether to sort the results descendingly ``withscores`` indicates to return the scores along with the values. The return type is a list of (value, score) pairs ``score_cast_func`` a callable used to cast the score return value
def connect_edges(graph): paths = [] for start, end in graph.array(graph.kdims): start_ds = graph.nodes[:, :, start] end_ds = graph.nodes[:, :, end] if not len(start_ds) or not len(end_ds): raise ValueError('Could not find node positions for all edges') start = start_ds.array(start_ds.kdims[:2]) end = end_ds.array(end_ds.kdims[:2]) paths.append(np.array([start[0], end[0]])) return paths
Given a Graph element containing abstract edges compute edge segments directly connecting the source and target nodes. This operation just uses internal HoloViews operations and will be a lot slower than the pandas equivalent.
def delete_duplicates(seq): seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))]
Remove duplicates from an iterable, preserving the order. Args: seq: Iterable of various type. Returns: list: List of unique objects.
def emit_after(self, event: str) -> Callable: def outer(func): @wraps(func) def wrapper(*args, **kwargs): returned = func(*args, **kwargs) self.emit(event) return returned return wrapper return outer
Decorator that emits events after the function is completed. :param event: Name of the event. :type event: str :return: Callable .. note: This plainly just calls functions without passing params into the subscribed callables. This is great if you want to do some kind of post processing without the callable requiring information before doing so.
def _cldf2lexstat( dataset, segments='segments', transcription='value', row='parameter_id', col='language_id'): D = _cldf2wld(dataset) return lingpy.LexStat(D, segments=segments, transcription=transcription, row=row, col=col)
Read LexStat object from cldf dataset.
def auc(y_true, y_pred, round=True): y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = y_true.round() if len(y_true) == 0 or len(np.unique(y_true)) < 2: return np.nan return skm.roc_auc_score(y_true, y_pred)
Area under the ROC curve
def notify_systemd(): try: import systemd.daemon except ImportError: if salt.utils.path.which('systemd-notify') \ and systemd_notify_call('--booted'): notify_socket = os.getenv('NOTIFY_SOCKET') if notify_socket: if notify_socket.startswith('@'): notify_socket = '\0{0}'.format(notify_socket[1:]) try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) sock.connect(notify_socket) sock.sendall('READY=1'.encode()) sock.close() except socket.error: return systemd_notify_call('--ready') return True return False if systemd.daemon.booted(): try: return systemd.daemon.notify('READY=1') except SystemError: pass
Notify systemd that this process has started
def unconsumed_ranges(self): res = IntervalTree() prev = None ranges = sorted([x for x in self.range_set], key=lambda x: x.begin) for rng in ranges: if prev is None: prev = rng continue res.add(Interval(prev.end, rng.begin)) prev = rng if len(self.range_set[self.tell()]) != 1: res.add(Interval(prev.end, self.tell())) return res
Return an IntervalTree of unconsumed ranges, of the format (start, end] with the end value not being included
def _compute_hparam_info_from_values(self, name, values): result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET) distinct_values = set( _protobuf_value_to_string(v) for v in values if _protobuf_value_type(v)) for v in values: v_type = _protobuf_value_type(v) if not v_type: continue if result.type == api_pb2.DATA_TYPE_UNSET: result.type = v_type elif result.type != v_type: result.type = api_pb2.DATA_TYPE_STRING if result.type == api_pb2.DATA_TYPE_STRING: break if result.type == api_pb2.DATA_TYPE_UNSET: return None if (result.type == api_pb2.DATA_TYPE_STRING and len(distinct_values) <= self._max_domain_discrete_len): result.domain_discrete.extend(distinct_values) return result
Builds an HParamInfo message from the hparam name and list of values. Args: name: string. The hparam name. values: list of google.protobuf.Value messages. The list of values for the hparam. Returns: An api_pb2.HParamInfo message.
def do_p(self, arg): try: self.message(bdb.safe_repr(self._getval(arg))) except Exception: pass
p expression Print the value of the expression.
def get_default_ca_certs(): if not hasattr(get_default_ca_certs, '_path'): for path in get_default_ca_cert_paths(): if os.path.exists(path): get_default_ca_certs._path = path break else: get_default_ca_certs._path = None return get_default_ca_certs._path
Try to find out system path with ca certificates. This path is cached and returned. If no path is found out, None is returned.
def standard_lstm_lm_200(dataset_name=None, vocab=None, pretrained=False, ctx=cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs): r predefined_args = {'embed_size': 200, 'hidden_size': 200, 'mode': 'lstm', 'num_layers': 2, 'tie_weights': True, 'dropout': 0.2} mutable_args = ['dropout'] assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) return _get_rnn_model(StandardRNN, 'standard_lstm_lm_200', dataset_name, vocab, pretrained, ctx, root, **predefined_args)
r"""Standard 2-layer LSTM language model with tied embedding and output weights. Both embedding and hidden dimensions are 200. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from the training set of the dataset. If None, then vocab is required, for specifying embedding weight size, and is directly returned. The pre-trained model achieves 108.25/102.26 ppl on Val and Test of wikitext-2 respectively. vocab : gluonnlp.Vocab or None, default None Vocabulary object to be used with the language model. Required when dataset_name is not specified. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab
def append(self, key, value=None, dir=False, ttl=None, timeout=None): return self.adapter.append(key, value, dir=dir, ttl=ttl, timeout=timeout)
Creates a new automatically increasing key in the given directory key.
def authnkey(self) -> dict: return {k: self._pubkey[k] for k in self._pubkey if self._pubkey[k].authn}
Accessor for public keys marked as authentication keys, by identifier.
def compute_eigenvalues(in_prefix, out_prefix): with open(out_prefix + ".parameters", "w") as o_file: print >>o_file, "genotypename: " + in_prefix + ".bed" print >>o_file, "snpname: " + in_prefix + ".bim" print >>o_file, "indivname: " + in_prefix + ".fam" print >>o_file, "evecoutname: " + out_prefix + ".evec.txt" print >>o_file, "evaloutname: " + out_prefix + ".eval.txt" print >>o_file, "numoutlieriter: 0" print >>o_file, "altnormstyle: NO" command = ["smartpca", "-p", out_prefix + ".parameters"] runCommand(command)
Computes the Eigenvalues using smartpca from Eigensoft. :param in_prefix: the prefix of the input files. :param out_prefix: the prefix of the output files. :type in_prefix: str :type out_prefix: str Creates a "parameter file" used by smartpca and runs it.
def get_hooks(self): if self.__hooks is None and self.hooks_class_name is not None: hooks_class = util.for_name(self.hooks_class_name) if not isinstance(hooks_class, type): raise ValueError("hooks_class_name must refer to a class, got %s" % type(hooks_class).__name__) if not issubclass(hooks_class, hooks.Hooks): raise ValueError( "hooks_class_name must refer to a hooks.Hooks subclass") self.__hooks = hooks_class(self) return self.__hooks
Returns a hooks.Hooks class or None if no hooks class has been set.
def install_module( self, target=None, package_manager=None, install_optional=False, production_only=False, force=False, node_paths=None, frozen_lockfile=None, workunit_name=None, workunit_labels=None): package_manager = package_manager or self.get_package_manager(target=target) command = package_manager.install_module( install_optional=install_optional, force=force, production_only=production_only, node_paths=node_paths, frozen_lockfile=frozen_lockfile ) return self._execute_command( command, workunit_name=workunit_name, workunit_labels=workunit_labels)
Installs node module using requested package_manager.
def create_table(self, table_name, obj=None, **kwargs): return self.client.create_table( table_name, obj=obj, database=self.name, **kwargs )
Dispatch to ImpalaClient.create_table. See that function's docstring for more
def _set_timeouts(self, timeouts): (send_timeout, recv_timeout) = (None, None) try: (send_timeout, recv_timeout) = timeouts except TypeError: raise EndpointError( '`timeouts` must be a pair of numbers (2, 3) which represent ' 'the timeout values for send and receive respectively') if send_timeout is not None: self.socket.set_int_option( nanomsg.SOL_SOCKET, nanomsg.SNDTIMEO, send_timeout) if recv_timeout is not None: self.socket.set_int_option( nanomsg.SOL_SOCKET, nanomsg.RCVTIMEO, recv_timeout)
Set socket timeouts for send and receive respectively
def create_roadmap_doc(dat, opFile): op = format_title('Roadmap for AIKIF') for h1 in dat['projects']: op += format_h1(h1) if dat[h1] is None: op += '(No details)\n' else: for h2 in dat[h1]: op += '\n' + format_h2(h2) if dat[h1][h2] is None: op += '(blank text)\n' else: for txt in dat[h1][h2]: op += ' - ' + txt + '\n' op += '\n' with open(opFile, 'w') as f: f.write(op)
takes a dictionary read from a yaml file and converts it to the roadmap documentation
def join_struct_arrays(arrays): sizes = np.array([a.itemsize for a in arrays]) offsets = np.r_[0, sizes.cumsum()] shape = arrays[0].shape joint = np.empty(shape + (offsets[-1],), dtype=np.uint8) for a, size, offset in zip(arrays, sizes, offsets): joint[...,offset:offset+size] = np.atleast_1d(a).view(np.uint8).reshape(shape + (size,)) dtype = sum((a.dtype.descr for a in arrays), []) return joint.ravel().view(dtype)
Takes a list of possibly structured arrays, concatenates their dtypes, and returns one big array with that dtype. Does the inverse of ``separate_struct_array``. :param list arrays: List of ``np.ndarray``s
def estimate_band_connection(prev_eigvecs, eigvecs, prev_band_order): metric = np.abs(np.dot(prev_eigvecs.conjugate().T, eigvecs)) connection_order = [] for overlaps in metric: maxval = 0 for i in reversed(range(len(metric))): val = overlaps[i] if i in connection_order: continue if val > maxval: maxval = val maxindex = i connection_order.append(maxindex) band_order = [connection_order[x] for x in prev_band_order] return band_order
A function to order the phonon eigenvectors taken from phonopy
def write(self, datapoint): if not isinstance(datapoint, DataPoint): raise TypeError("First argument must be a DataPoint object") datapoint._stream_id = self.get_stream_id() if self._cached_data is not None and datapoint.get_data_type() is None: datapoint._data_type = self.get_data_type() self._conn.post("/ws/DataPoint/{}".format(self.get_stream_id()), datapoint.to_xml())
Write some raw data to a stream using the DataPoint API This method will mutate the datapoint provided to populate it with information available from the stream as it is available (but without making any new HTTP requests). For instance, we will add in information about the stream data type if it is available so that proper type conversion happens. Values already set on the datapoint will not be overridden (except for path) :param DataPoint datapoint: The :class:`.DataPoint` that should be written to Device Cloud
def _resource(resource, pretty: bool = None, **data): data = clean_data(data) ctx = click.get_current_context() if ctx.obj.get("env_prefix"): data["env_prefix"] = ctx.obj["env_prefix"] rsp = resource(**data) dump = partial(json.dumps, indent=4) if pretty else partial(json.dumps) click.echo(dump(rsp))
The callback func that will be hooked to the generic resource commands
def ascolumn(x, dtype = None): x = asarray(x, dtype) return x if len(x.shape) >= 2 else x.reshape(len(x),1)
Convert ``x`` into a ``column``-type ``numpy.ndarray``.
def get_next(self): self._counter_curr += 1 suffix = self._separator + "%s" % str(self._counter_curr) return self._base_name + suffix
Return next name.
def insert(self, rectangle): rectangle = np.asanyarray(rectangle, dtype=np.float64) for child in self.child: if child is not None: attempt = child.insert(rectangle) if attempt is not None: return attempt if self.occupied: return None size_test = self.extents - rectangle if np.any(size_test < -tol.zero): return None self.occupied = True if np.all(size_test < tol.zero): return self.bounds[0:2] vertical = size_test[0] > size_test[1] length = rectangle[int(not vertical)] child_bounds = self.split(length, vertical) self.child[0] = RectangleBin(bounds=child_bounds[0]) self.child[1] = RectangleBin(bounds=child_bounds[1]) return self.child[0].insert(rectangle)
Insert a rectangle into the bin. Parameters ------------- rectangle: (2,) float, size of rectangle to insert
def make_int(value, missing=-1): if isinstance(value, six.string_types): if not value.strip(): return missing elif value is None: return missing return int(value)
Convert string value to long, '' to missing
def separator(self, *args, **kwargs): levelOverride = kwargs.get('level') or self._lastlevel self._log(levelOverride, '', 'separator', args, kwargs)
Prints a separator to the log. This can be used to separate blocks of log messages. The separator will default its log level to the level of the last message printed unless specified with the level= kwarg. The length and type of the separator string is determined by the current style. See ``setStyle``
def create_paired_device(self, dev_id, agent_path, capability, cb_notify_device, cb_notify_error): return self._interface.CreatePairedDevice(dev_id, agent_path, capability, reply_handler=cb_notify_device, error_handler=cb_notify_error)
Creates a new object path for a remote device. This method will connect to the remote device and retrieve all SDP records and then initiate the pairing. If a previously :py:meth:`create_device` was used successfully, this method will only initiate the pairing. Compared to :py:meth:`create_device` this method will fail if the pairing already exists, but not if the object path already has been created. This allows applications to use :py:meth:`create_device` first and then, if needed, use :py:meth:`create_paired_device` to initiate pairing. The agent object path is assumed to reside within the process (D-Bus connection instance) that calls this method. No separate registration procedure is needed for it and it gets automatically released once the pairing operation is complete. :param str dev_id: New device MAC address create e.g., '11:22:33:44:55:66' :param str agent_path: Path used when creating the bluetooth agent e.g., '/test/agent' :param str capability: Pairing agent capability e.g., 'DisplayYesNo', etc :param func cb_notify_device: Callback on success. The callback is called with the new device's object path as an argument. :param func cb_notify_error: Callback on error. The callback is called with the error reason. :return: :raises dbus.Exception: org.bluez.Error.InvalidArguments :raises dbus.Exception: org.bluez.Error.Failed
def get_request_headers(self, *args, **kwds): if self.request_headers: return self._unpack_headers(self.request_headers)
A convenience method for obtaining the headers that were sent to the S3 server. The AWS S3 API depends upon setting headers. This method is provided as a convenience for debugging issues with the S3 communications.
def _pixel_to_tile(x: float, y: float) -> Tuple[float, float]: xy = tcod.ffi.new("double[2]", (x, y)) tcod.lib.TCOD_sys_pixel_to_tile(xy, xy + 1) return xy[0], xy[1]
Convert pixel coordinates to tile coordinates.
def post_slack_message(message=None, channel=None, username=None, icon_emoji=None): LOG.debug('Slack Channel: %s\nSlack Message: %s', channel, message) slack = slacker.Slacker(SLACK_TOKEN) try: slack.chat.post_message(channel=channel, text=message, username=username, icon_emoji=icon_emoji) LOG.info('Message posted to %s', channel) except slacker.Error: LOG.info("error posted message to %s", channel)
Format the message and post to the appropriate slack channel. Args: message (str): Message to post to slack channel (str): Desired channel. Must start with #
def getAllReadGroupSets(self): for dataset in self.getAllDatasets(): iterator = self._client.search_read_group_sets( dataset_id=dataset.id) for readGroupSet in iterator: yield readGroupSet
Returns all readgroup sets on the server.
def add_field_like(self, name, like_array): new_shape = list(like_array.shape) new_shape[0] = len(self) new_data = ma.empty(new_shape, like_array.dtype) new_data.mask = True self.add_field(name, new_data)
Add a new field to the Datamat with the dtype of the like_array and the shape of the like_array except for the first dimension which will be instead the field-length of this Datamat.
def _make_ssh_forward_server(self, remote_address, local_bind_address): _Handler = self._make_ssh_forward_handler_class(remote_address) try: if isinstance(local_bind_address, string_types): forward_maker_class = self._make_unix_ssh_forward_server_class else: forward_maker_class = self._make_ssh_forward_server_class _Server = forward_maker_class(remote_address) ssh_forward_server = _Server( local_bind_address, _Handler, logger=self.logger, ) if ssh_forward_server: ssh_forward_server.daemon_threads = self.daemon_forward_servers self._server_list.append(ssh_forward_server) self.tunnel_is_up[ssh_forward_server.server_address] = False else: self._raise( BaseSSHTunnelForwarderError, 'Problem setting up ssh {0} <> {1} forwarder. You can ' 'suppress this exception by using the `mute_exceptions`' 'argument'.format(address_to_str(local_bind_address), address_to_str(remote_address)) ) except IOError: self._raise( BaseSSHTunnelForwarderError, "Couldn't open tunnel {0} <> {1} might be in use or " "destination not reachable".format( address_to_str(local_bind_address), address_to_str(remote_address) ) )
Make SSH forward proxy Server class
def create(self, neighbors): data = {'neighbors': neighbors} return super(ApiV4Neighbor, self).post('api/v4/neighbor/', data)
Method to create neighbors :param neighbors: List containing neighbors desired to be created on database :return: None
def unescape(s, unicode_action="replace"): import HTMLParser hp = HTMLParser.HTMLParser() s = hp.unescape(s) s = s.encode('ascii', unicode_action) s = s.replace("\n", "").strip() return s
Unescape HTML strings, and convert &amp; etc.
def class_balancing_sample_weights(y): h = np.bincount(y) cls_weight = 1.0 / (h.astype(float) * len(np.nonzero(h)[0])) cls_weight[np.isnan(cls_weight)] = 0.0 sample_weight = cls_weight[y] return sample_weight
Compute sample weight given an array of sample classes. The weights are assigned on a per-class basis and the per-class weights are inversely proportional to their frequency. Parameters ---------- y: NumPy array, 1D dtype=int sample classes, values must be 0 or positive Returns ------- NumPy array, 1D dtype=float per sample weight array
def ucnstring_to_unicode(ucn_string): ucn_string = ucnstring_to_python(ucn_string).decode('utf-8') assert isinstance(ucn_string, text_type) return ucn_string
Return ucnstring as Unicode.
def start(sync_event_source, loop=None): if not loop: loop = asyncio.get_event_loop() event_source = asyncio.Queue(loop=loop) bridge = threading.Thread(target=_multiprocessing_to_asyncio, args=(sync_event_source, event_source, loop), daemon=True) bridge.start() app = init_app(event_source, loop=loop) aiohttp.web.run_app(app, host=config['wsserver']['host'], port=config['wsserver']['port'])
Create and start the WebSocket server.
def list_event_sources(self): path = '/archive/{}/events/sources'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.EventSourceInfo() message.ParseFromString(response.content) sources = getattr(message, 'source') return iter(sources)
Returns the existing event sources. :rtype: ~collections.Iterable[str]
def _compute(self): src_path = self.ctx.src_path if not src_path.exists: return NONE if src_path.is_null: return None try: if self.parse: value = self.parse(src_path) else: value = self._parse(src_path) return value except (SourceError, ValueError), ex: self.ctx.errors.invalid(str(ex)) return ERROR
Processes this fields `src` from `ctx.src`.
async def Check(self, stream): request = await stream.recv_message() checks = self._checks.get(request.service) if checks is None: await stream.send_trailing_metadata(status=Status.NOT_FOUND) elif len(checks) == 0: await stream.send_message(HealthCheckResponse( status=HealthCheckResponse.SERVING, )) else: for check in checks: await check.__check__() await stream.send_message(HealthCheckResponse( status=_status(checks), ))
Implements synchronous periodic checks
def register(self, resource, event, trigger, **kwargs): super(AristaTrunkDriver, self).register(resource, event, trigger, kwargs) registry.subscribe(self.subport_create, resources.SUBPORTS, events.AFTER_CREATE) registry.subscribe(self.subport_delete, resources.SUBPORTS, events.AFTER_DELETE) registry.subscribe(self.trunk_create, resources.TRUNK, events.AFTER_CREATE) registry.subscribe(self.trunk_update, resources.TRUNK, events.AFTER_UPDATE) registry.subscribe(self.trunk_delete, resources.TRUNK, events.AFTER_DELETE) self.core_plugin = directory.get_plugin() LOG.debug("Arista trunk driver initialized.")
Called in trunk plugin's AFTER_INIT
def loaders(*specifiers): for specifier in specifiers: if isinstance(specifier, Locality): yield from _LOADERS[specifier] else: yield specifier
Generates loaders in the specified order. Arguments can be `.Locality` instances, producing the loader(s) available for that locality, `str` instances (used as file path templates) or `callable`s. These can be mixed: .. code-block:: python # define a load order using predefined user-local locations, # an explicit path, a template and a user-defined function load_order = loaders(Locality.user, '/etc/defaults/hard-coded.yaml', '/path/to/{name}.{extension}', my_loader) # load configuration for name 'my-application' using the load order # defined above config = load_name('my-application', load_order=load_order) :param specifiers: :return: a `generator` of configuration loaders in the specified order
def dft_task(cls, mol, xc="b3lyp", **kwargs): t = NwTask.from_molecule(mol, theory="dft", **kwargs) t.theory_directives.update({"xc": xc, "mult": t.spin_multiplicity}) return t
A class method for quickly creating DFT tasks with optional cosmo parameter . Args: mol: Input molecule xc: Exchange correlation to use. \\*\\*kwargs: Any of the other kwargs supported by NwTask. Note the theory is always "dft" for a dft task.
def events(self, argv): opts = cmdline(argv, FLAGS_EVENTS) self.foreach(opts.args, lambda job: output(job.events(**opts.kwargs)))
Retrieve events for the specified search jobs.
def _prepare_conn_args(self, kwargs): kwargs['connect_over_uds'] = True kwargs['timeout'] = kwargs.get('timeout', 60) kwargs['cookie'] = kwargs.get('cookie', 'admin') if self._use_remote_connection(kwargs): kwargs['transport'] = kwargs.get('transport', 'https') if kwargs['transport'] == 'https': kwargs['port'] = kwargs.get('port', 443) else: kwargs['port'] = kwargs.get('port', 80) kwargs['verify'] = kwargs.get('verify', True) if isinstance(kwargs['verify'], bool): kwargs['verify_ssl'] = kwargs['verify'] else: kwargs['ca_bundle'] = kwargs['verify'] kwargs['connect_over_uds'] = False return kwargs
Set connection arguments for remote or local connection.
def append(self, *nodes: Union[AbstractNode, str]) -> None: node = _to_node_list(nodes) self.appendChild(node)
Append new nodes after last child node.
def query(number, domains, resolver=None): if resolver is None: resolver = dns.resolver.get_default_resolver() for domain in domains: if isinstance(domain, (str, unicode)): domain = dns.name.from_text(domain) qname = dns.e164.from_e164(number, domain) try: return resolver.query(qname, 'NAPTR') except dns.resolver.NXDOMAIN: pass raise dns.resolver.NXDOMAIN
Look for NAPTR RRs for the specified number in the specified domains. e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
def match(self, package): if isinstance(package, basestring): from .packages import Package package = Package.parse(package) if self.name != package.name: return False if self.version_constraints and \ package.version not in self.version_constraints: return False if self.build_options: if package.build_options: if self.build_options - package.build_options: return False else: return True else: return False else: return True
Match ``package`` with the requirement. :param package: Package to test with the requirement. :type package: package expression string or :class:`Package` :returns: ``True`` if ``package`` satisfies the requirement. :rtype: bool
def lookup_object(model, object_id, slug, slug_field): lookup_kwargs = {} if object_id: lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id elif slug and slug_field: lookup_kwargs['%s__exact' % slug_field] = slug else: raise GenericViewError( "Generic view must be called with either an object_id or a" " slug/slug_field.") try: return model.objects.get(**lookup_kwargs) except ObjectDoesNotExist: raise Http404("No %s found for %s" % (model._meta.verbose_name, lookup_kwargs))
Return the ``model`` object with the passed ``object_id``. If ``object_id`` is None, then return the object whose ``slug_field`` equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed, then raise Http404 exception.
def get_peer_resources(self, peer_jid): try: d = dict(self._presences[peer_jid]) d.pop(None, None) return d except KeyError: return {}
Return a dict mapping resources of the given bare `peer_jid` to the presence state last received for that resource. Unavailable presence states are not included. If the bare JID is in a error state (i.e. an error presence stanza has been received), the returned mapping is empty.
def tryload_cache(dpath, fname, cfgstr, verbose=None): try: return load_cache(dpath, fname, cfgstr, verbose=verbose) except IOError: return None
returns None if cache cannot be loaded
def clean_community_indexes(communityID): communityID = np.array(communityID) cid_shape = communityID.shape if len(cid_shape) > 1: communityID = communityID.flatten() new_communityID = np.zeros(len(communityID)) for i, n in enumerate(np.unique(communityID)): new_communityID[communityID == n] = i if len(cid_shape) > 1: new_communityID = new_communityID.reshape(cid_shape) return new_communityID
Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible. Parameters ---------- communityID : array-like list or array of integers. Output from community detection algorithems. Returns ------- new_communityID : array cleaned list going from 0 to len(np.unique(communityID))-1 Note ----- Behaviour of funciton entails that the lowest community integer in communityID will recieve the lowest integer in new_communityID.
def clear_key_before(self, key, namespace=None, timestamp=None): block_size = self.config.block_size if namespace is None: namespace = self.config.namespace if timestamp is not None: offset, remainder = divmod(timestamp, block_size) if remainder: raise ValueError('timestamp must be on a block boundary') if offset == 0: raise ValueError('cannot delete before offset zero') offset -= 1 self.driver.clear_key_before(key, namespace, offset, timestamp) else: self.driver.clear_key_before(key, namespace)
Clear all data before `timestamp` for a given key. Note that the timestamp is rounded down to the nearest block boundary
def format_citations(zid, url='https://zenodo.org/', hits=10, tag_prefix='v'): url = ('{url}/api/records/?' 'page=1&' 'size={hits}&' 'q=conceptrecid:"{id}"&' 'sort=-version&' 'all_versions=True'.format(id=zid, url=url, hits=hits)) metadata = requests.get(url).json() lines = [] for i, hit in enumerate(metadata['hits']['hits']): version = hit['metadata']['version'][len(tag_prefix):] lines.append('-' * len(version)) lines.append(version) lines.append('-' * len(version)) lines.append('') lines.append('.. image:: {badge}\n' ' :target: {doi}'.format(**hit['links'])) if i < hits - 1: lines.append('') return '\n'.join(lines)
Query and format a citations page from Zenodo entries Parameters ---------- zid : `int`, `str` the Zenodo ID of the target record url : `str`, optional the base URL of the Zenodo host, defaults to ``https://zenodo.org`` hist : `int`, optional the maximum number of hits to show, default: ``10`` tag_prefix : `str`, optional the prefix for git tags. This is removed to generate the section headers in the output RST Returns ------- rst : `str` an RST-formatted string of DOI badges with URLs
def needsattached(func): @functools.wraps(func) def wrap(self, *args, **kwargs): if not self.attached: raise PositionError('Not attached to any process.') return func(self, *args, **kwargs) return wrap
Decorator to prevent commands from being used when not attached.
def print_sorted_counter(counter, tab=1): for key, count in sorted(counter.items(), key=itemgetter(1), reverse=True): print "{0}{1} - {2}".format('\t'*tab, key, count)
print all elements of a counter in descending order
def get_instance(self, payload): return CertificateInstance(self._version, payload, fleet_sid=self._solution['fleet_sid'], )
Build an instance of CertificateInstance :param dict payload: Payload response from the API :returns: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateInstance :rtype: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateInstance
def cmd_sync(self, low): kwargs = copy.deepcopy(low) for ignore in ['tgt', 'fun', 'arg', 'timeout', 'tgt_type', 'kwarg']: if ignore in kwargs: del kwargs[ignore] return self.cmd(low['tgt'], low['fun'], low.get('arg', []), low.get('timeout'), low.get('tgt_type'), low.get('kwarg'), **kwargs)
Execute a salt-ssh call synchronously. .. versionadded:: 2015.5.0 WARNING: Eauth is **NOT** respected .. code-block:: python client.cmd_sync({ 'tgt': 'silver', 'fun': 'test.ping', 'arg': (), 'tgt_type'='glob', 'kwarg'={} }) {'silver': {'fun_args': [], 'jid': '20141202152721523072', 'return': True, 'retcode': 0, 'success': True, 'fun': 'test.ping', 'id': 'silver'}}
def attach_image(field, nested_fields, page, record_keeper=None): if (field in nested_fields) and nested_fields[field]: foreign_image_id = nested_fields[field]["id"] if record_keeper: try: local_image_id = record_keeper.get_local_image( foreign_image_id) local_image = Image.objects.get(id=local_image_id) setattr(page, field, local_image) except ObjectDoesNotExist: raise ObjectDoesNotExist( ("executing attach_image: local image referenced" "in record_keeper does not actually exist."), None) except Exception: raise else: raise Exception( ("Attempted to attach image without record_keeper. " "This functionality is not yet implemented"))
Returns a function that attaches an image to page if it exists Currenlty assumes that images have already been imported and info has been stored in record_keeper
def singleOrPair(obj): if len(list(obj.__class__.__mro__)) <= 2: return 'Neither' else: if ancestorJr(obj) is Pair: return 'Pair' elif ancestor(obj) is Single: return 'Single' else: return 'Neither'
Chech an object is single or pair or neither. Of course,, all pairs are single, so what the function is really detecting is whether an object is only single or at the same time a pair. Args: obj (object): Literally anything. Returns: str: 'Single', or 'Pair', or 'Neither'
def version(self, context=None): if self.replaces_scope and self.replaces_name: if context: old_opts = context.options.for_scope(self.replaces_scope) if old_opts.get(self.replaces_name) and not old_opts.is_default(self.replaces_name): return old_opts.get(self.replaces_name) else: logger.warn('Cannot resolve version of {} from deprecated option {} in scope {} without a ' 'context!'.format(self._get_name(), self.replaces_name, self.replaces_scope)) return self.get_options().version
Returns the version of the specified binary tool. If replaces_scope and replaces_name are defined, then the caller must pass in a context, otherwise no context should be passed. # TODO: Once we're migrated, get rid of the context arg. :API: public
def apply_single_tag_set(tag_set, selection): def tags_match(server_tags): for key, value in tag_set.items(): if key not in server_tags or server_tags[key] != value: return False return True return selection.with_server_descriptions( [s for s in selection.server_descriptions if tags_match(s.tags)])
All servers matching one tag set. A tag set is a dict. A server matches if its tags are a superset: A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}. The empty tag set {} matches any server.
def load_profiles(self, overwrite=False): for profile in self.minimum_needs.get_profiles(overwrite): self.profile_combo.addItem(profile) minimum_needs = self.minimum_needs.get_full_needs() self.profile_combo.setCurrentIndex( self.profile_combo.findText(minimum_needs['profile']))
Load the profiles into the dropdown list. :param overwrite: If we overwrite existing profiles from the plugin. :type overwrite: bool
def show_detailed_monitoring(name=None, instance_id=None, call=None, quiet=False): if call != 'action': raise SaltCloudSystemExit( 'The show_detailed_monitoring action must be called with -a or --action.' ) location = get_location() if six.text_type(name).startswith('i-') and (len(name) == 10 or len(name) == 19): instance_id = name if not name and not instance_id: raise SaltCloudSystemExit( 'The show_detailed_monitoring action must be provided with a name or instance\ ID' ) matched = _get_node(name=name, instance_id=instance_id, location=location) log.log( logging.DEBUG if quiet is True else logging.INFO, 'Detailed Monitoring is %s for %s', matched['monitoring'], name ) return matched['monitoring']
Show the details from EC2 regarding cloudwatch detailed monitoring.
def _wait_output(popen, is_slow): proc = Process(popen.pid) try: proc.wait(settings.wait_slow_command if is_slow else settings.wait_command) return True except TimeoutExpired: for child in proc.children(recursive=True): _kill_process(child) _kill_process(proc) return False
Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool
def display(self): w, h = (0, 0) for line in self.shell('dumpsys', 'display').splitlines(): m = _DISPLAY_RE.search(line, 0) if not m: continue w = int(m.group('width')) h = int(m.group('height')) o = int(m.group('orientation')) w, h = min(w, h), max(w, h) return self.Display(w, h, o) output = self.shell('LD_LIBRARY_PATH=/data/local/tmp', self.__minicap, '-i') try: data = json.loads(output) (w, h, o) = (data['width'], data['height'], data['rotation']/90) return self.Display(w, h, o) except ValueError: pass
Return device width, height, rotation
def constructRows(self, items): rows = [] for item in items: row = dict((colname, col.extractValue(self, item)) for (colname, col) in self.columns.iteritems()) link = self.linkToItem(item) if link is not None: row[u'__id__'] = link rows.append(row) return rows
Build row objects that are serializable using Athena for sending to the client. @param items: an iterable of objects compatible with my columns' C{extractValue} methods. @return: a list of dictionaries, where each dictionary has a string key for each column name in my list of columns.
def get_command_templates(command_tokens, file_tokens=[], path_tokens=[], job_options=[]): files = get_files(file_tokens) paths = get_paths(path_tokens) job_options = get_options(job_options) templates = _get_command_templates(command_tokens, files, paths, job_options) for command_template in templates: command_template._dependencies = _get_prelim_dependencies( command_template, templates) return templates
Given a list of tokens from the grammar, return a list of commands.
def _generate_default_grp_constraints(roles, network_constraints): default_delay = network_constraints.get('default_delay') default_rate = network_constraints.get('default_rate') default_loss = network_constraints.get('default_loss', 0) except_groups = network_constraints.get('except', []) grps = network_constraints.get('groups', roles.keys()) grps = [expand_groups(g) for g in grps] grps = [x for expanded_group in grps for x in expanded_group] return [{'src': grp1, 'dst': grp2, 'delay': default_delay, 'rate': default_rate, 'loss': default_loss} for grp1 in grps for grp2 in grps if ((grp1 != grp2 or _src_equals_dst_in_constraints(network_constraints, grp1)) and grp1 not in except_groups and grp2 not in except_groups)]
Generate default symetric grp constraints.
def run(host='0.0.0.0', port=5000, reload=True, debug=True): from werkzeug.serving import run_simple app = bootstrap.get_app() return run_simple( hostname=host, port=port, application=app, use_reloader=reload, use_debugger=debug, )
Run development server
def _highlight_lines(self, tokensource): hls = self.hl_lines for i, (t, value) in enumerate(tokensource): if t != 1: yield t, value if i + 1 in hls: if self.noclasses: style = '' if self.style.highlight_color is not None: style = (' style="background-color: %s"' % (self.style.highlight_color,)) yield 1, '<span%s>%s</span>' % (style, value) else: yield 1, '<span class="hll">%s</span>' % value else: yield 1, value
Highlighted the lines specified in the `hl_lines` option by post-processing the token stream coming from `_format_lines`.
def smooth_angle_channels(self, channels): for vertex in self.vertices: for col in vertex.meta['rot_ind']: if col: for k in range(1, channels.shape[0]): diff=channels[k, col]-channels[k-1, col] if abs(diff+360.)<abs(diff): channels[k:, col]=channels[k:, col]+360. elif abs(diff-360.)<abs(diff): channels[k:, col]=channels[k:, col]-360.
Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions.
def finalize_sv(orig_vcf, data, items): paired = vcfutils.get_paired(items) if paired: sample_vcf = orig_vcf if paired.tumor_name == dd.get_sample_name(data) else None else: sample_vcf = "%s-%s.vcf.gz" % (utils.splitext_plus(orig_vcf)[0], dd.get_sample_name(data)) sample_vcf = vcfutils.select_sample(orig_vcf, dd.get_sample_name(data), sample_vcf, data["config"]) if sample_vcf: effects_vcf, _ = effects.add_to_vcf(sample_vcf, data, "snpeff") else: effects_vcf = None return effects_vcf or sample_vcf
Finalize structural variants, adding effects and splitting if needed.
def _container_blacklist(self): if self.__container_blacklist is None: self.__container_blacklist = \ set(self.CLOUD_BROWSER_CONTAINER_BLACKLIST or []) return self.__container_blacklist
Container blacklist.
def replace_uuid_w_names(self, resp): col_mapper = self.get_point_name(resp.context)["?point"].to_dict() resp.df.rename(columns=col_mapper, inplace=True) return resp
Replace the uuid's with names. Parameters ---------- resp : ??? ??? Returns ------- ??? ???
def initialize_page(title, style, script, header=None): page = markup.page(mode="strict_html") page._escape = False page.init(title=title, css=style, script=script, header=header) return page
A function that returns a markup.py page object with the required html header.
def vertex_fingerprints(self): return self.get_vertex_fingerprints( [self.get_vertex_string(i) for i in range(self.num_vertices)], [self.get_edge_string(i) for i in range(self.num_edges)], )
A fingerprint for each vertex The result is invariant under permutation of the vertex indexes. Vertices that are symmetrically equivalent will get the same fingerprint, e.g. the hydrogens in methane would get the same fingerprint.
def first(sequence, message=None): try: return next(iter(sequence)) except StopIteration: raise ValueError(message or ('Sequence is empty: %s' % sequence))
The first item in that sequence If there aren't any, raise a ValueError with that message
def list_suites(suitedir="./testcases/suites", cloud=False): suites = [] suites.extend(TestSuite.get_suite_files(suitedir)) if cloud: names = cloud.get_campaign_names() if names: suites.append("------------------------------------") suites.append("FROM CLOUD:") suites.extend(names) if not suites: return None from prettytable import PrettyTable table = PrettyTable(["Testcase suites"]) for suite in suites: table.add_row([suite]) return table
Static method for listing suites from both local source and cloud. Uses PrettyTable to generate the table. :param suitedir: Local directory for suites. :param cloud: cloud module :return: PrettyTable object or None if no test cases were found