code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def graph_lasso(X, num_folds): print("GraphLasso (sklearn)") model = GraphLassoCV(cv=num_folds) model.fit(X) print(" lam_: {}".format(model.alpha_)) return model.covariance_, model.precision_, model.alpha_
Estimate inverse covariance via scikit-learn GraphLassoCV class.
def read_block(self, size, from_date=None): search_query = self._build_search_query(from_date) hits_block = [] for hit in helpers.scan(self._es_conn, search_query, scroll='300m', index=self._es_index, preserve_order=True): hits_block.append(hit) if len(hits_block) % size == 0: yield hits_block hits_block = [] if len(hits_block) > 0: yield hits_block
Read items and return them in blocks. :param from_date: start date for incremental reading. :param size: block size. :return: next block of items when any available. :raises ValueError: `metadata__timestamp` field not found in index :raises NotFoundError: index not found in ElasticSearch
def _set_country(self, c): self.location.countrycode = c.split()[0].split('=')[1].strip().upper()
callback if we used Tor's GETINFO ip-to-country
def discover_slaves(self, service_name): "Returns a list of alive slaves for service ``service_name``" for sentinel in self.sentinels: try: slaves = sentinel.sentinel_slaves(service_name) except (ConnectionError, ResponseError, TimeoutError): continue slaves = self.filter_slaves(slaves) if slaves: return slaves return []
Returns a list of alive slaves for service ``service_name``
def first_ipv6(self) -> Optional[AddressInfo]: for info in self._address_infos: if info.family == socket.AF_INET6: return info
The first IPV6 address.
def cache_model(key_params, timeout='default'): def decorator_fn(fn): return CacheModelDecorator().decorate(key_params, timeout, fn) return decorator_fn
Caching decorator for app models in task.perform
def get_lbry_api_function_docs(url=LBRY_API_RAW_JSON_URL): try: docs_page = urlopen(url) contents = docs_page.read().decode("utf-8") return loads(contents) except URLError as UE: print(UE) except Exception as E: print(E) return []
Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :return: List of functions retrieved from the `url` given :rtype: list
def username_matches_request_user(view_fn): @wraps(view_fn) def wrapper(request, username, *args, **kwargs): User = get_user_model() user = get_object_or_404(User, username=username) if user != request.user: return HttpResponseForbidden() else: return view_fn(request, user, *args, **kwargs) return wrapper
Checks if the username matches the request user, and if so replaces username with the actual user object. Returns 404 if the username does not exist, and 403 if it doesn't match.
def _handle_units_placement(changeset, units, records): for service_name, service in sorted(changeset.bundle['services'].items()): num_units = service.get('num_units') if num_units is None: continue placement_directives = service.get('to', []) if not isinstance(placement_directives, (list, tuple)): placement_directives = [placement_directives] if placement_directives and not changeset.is_legacy_bundle(): placement_directives += ( placement_directives[-1:] * (num_units - len(placement_directives))) placed_in_services = {} for i in range(num_units): unit = units['{}/{}'.format(service_name, i)] record = records[unit['record']] if i < len(placement_directives): record = _handle_unit_placement( changeset, units, unit, record, placement_directives[i], placed_in_services) changeset.send(record)
Ensure that requires and placement directives are taken into account.
def join_path(self, *path): path = self.directory_sep().join(path) return self.normalize_path(path)
Unite entries to generate a single path :param path: path items to unite :return: str
def _current_web_port(self): info = inspect_container(self._get_container_name('web')) if info is None: return None try: if not info['State']['Running']: return None return info['NetworkSettings']['Ports']['5000/tcp'][0]['HostPort'] except TypeError: return None
return just the port number for the web container, or None if not running
def _recv_sf(self, data): self.rx_timer.cancel() if self.rx_state != ISOTP_IDLE: warning("RX state was reset because single frame was received") self.rx_state = ISOTP_IDLE length = six.indexbytes(data, 0) & 0xf if len(data) - 1 < length: return 1 msg = data[1:1 + length] self.rx_queue.put(msg) for cb in self.rx_callbacks: cb(msg) self.call_release() return 0
Process a received 'Single Frame' frame
def xyz_with_ports(self, arrnx3): if not self.children: if not arrnx3.shape[0] == 1: raise ValueError( 'Trying to set position of {} with more than one' 'coordinate: {}'.format( self, arrnx3)) self.pos = np.squeeze(arrnx3) else: for atom, coords in zip( self._particles( include_ports=True), arrnx3): atom.pos = coords
Set the positions of the particles in the Compound, including the Ports. Parameters ---------- arrnx3 : np.ndarray, shape=(n,3), dtype=float The new particle positions
def get_namespace(taskfileinfo): element = taskfileinfo.task.element name = element.name return name + "_1"
Return a suitable name for a namespace for the taskfileinfo Returns the name of the shot/asset with a "_1" suffix. When you create the namespace the number will automatically be incremented by Maya. :param taskfileinfo: the taskfile info for the file that needs a namespace :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: a namespace suggestion :rtype: str :raises: None
def install_hg(path): hook = op.join(path, 'hgrc') if not op.isfile(hook): open(hook, 'w+').close() c = ConfigParser() c.readfp(open(hook, 'r')) if not c.has_section('hooks'): c.add_section('hooks') if not c.has_option('hooks', 'commit'): c.set('hooks', 'commit', 'python:pylama.hooks.hg_hook') if not c.has_option('hooks', 'qrefresh'): c.set('hooks', 'qrefresh', 'python:pylama.hooks.hg_hook') c.write(open(hook, 'w+'))
Install hook in Mercurial repository.
def _build_option_description(k): o = _get_registered_option(k) d = _get_deprecated_option(k) s = '{k} '.format(k=k) if o.doc: s += '\n'.join(o.doc.strip().split('\n')) else: s += 'No description available.' if o: s += ('\n [default: {default}] [currently: {current}]' .format(default=o.defval, current=_get_option(k, True))) if d: s += '\n (Deprecated' s += (', use `{rkey}` instead.' .format(rkey=d.rkey if d.rkey else '')) s += ')' return s
Builds a formatted description of a registered option and prints it
def read_caffemodel(prototxt_fname, caffemodel_fname): if use_caffe: caffe.set_mode_cpu() net = caffe.Net(prototxt_fname, caffemodel_fname, caffe.TEST) layer_names = net._layer_names layers = net.layers return (layers, layer_names) else: proto = caffe_pb2.NetParameter() with open(caffemodel_fname, 'rb') as f: proto.ParseFromString(f.read()) return (get_layers(proto), None)
Return a caffe_pb2.NetParameter object that defined in a binary caffemodel file
def _adjust_returns(returns, adjustment_factor): if isinstance(adjustment_factor, (float, int)) and adjustment_factor == 0: return returns return returns - adjustment_factor
Returns the returns series adjusted by adjustment_factor. Optimizes for the case of adjustment_factor being 0 by returning returns itself, not a copy! Parameters ---------- returns : pd.Series or np.ndarray adjustment_factor : pd.Series or np.ndarray or float or int Returns ------- adjusted_returns : array-like
def process_all_json_files(build_dir): html_files = [] for root, _, files in os.walk(build_dir): for filename in fnmatch.filter(files, '*.fjson'): if filename in ['search.fjson', 'genindex.fjson', 'py-modindex.fjson']: continue html_files.append(os.path.join(root, filename)) page_list = [] for filename in html_files: try: result = process_file(filename) if result: page_list.append(result) except: pass return page_list
Return a list of pages to index
def is_winding_consistent(self): if self.is_empty: return False populate = self.is_watertight return self._cache['is_winding_consistent']
Does the mesh have consistent winding or not. A mesh with consistent winding has each shared edge going in an opposite direction from the other in the pair. Returns -------- consistent : bool Is winding is consistent or not
def metadata_matches(self, query={}): result = len(query.keys()) > 0 for key in query.keys(): result = result and query[key] == self.metadata.get(key) return result
Returns key matches to metadata This will check every key in query for a matching key in metadata returning true if every key is in metadata. query without keys return false. Args: query(object): metadata for matching Returns: bool: True: when key count in query is > 0 and all keys in query in self.metadata False: if key count in query is <= 0 or any key in query not found in self.metadata
def newfeed(ctx, symbol, price, market, cer, mssr, mcr, account): if cer: cer = Price(cer, quote=symbol, base="1.3.0", bitshares_instance=ctx.bitshares) print_tx( ctx.bitshares.publish_price_feed( symbol, Price(price, market), cer=cer, mssr=mssr, mcr=mcr, account=account ) )
Publish a price feed! Examples: \b uptick newfeed USD 0.01 USD/BTS uptick newfeed USD 100 BTS/USD Core Exchange Rate (CER) \b If no CER is provided, the cer will be the same as the settlement price with a 5% premium (Only if the 'market' is against the core asset (e.g. BTS)). The CER is always defined against the core asset (BTS). This means that if the backing asset is not the core asset (BTS), then you must specify your own cer as a float. The float `x` will be interpreted as `x BTS/SYMBOL`.
def add_surface(self, name, surface): assert surface is not None if hasattr(self.module, name): raise ThriftCompilerError( 'Cannot define "%s". The name has already been used.' % name ) setattr(self.module, name, surface)
Adds a top-level attribute with the given name to the module.
def geoadd(self, name, *values): if len(values) % 3 != 0: raise DataError("GEOADD requires places with lon, lat and name" " values") return self.execute_command('GEOADD', name, *values)
Add the specified geospatial items to the specified key identified by the ``name`` argument. The Geospatial items are given as ordered members of the ``values`` argument, each item or place is formed by the triad longitude, latitude and name.
def remove(self, force=False): return self.client.api.remove_plugin(self.name, force=force)
Remove the plugin from the server. Args: force (bool): Remove even if the plugin is enabled. Default: False Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def read_ipx(self, length): if length is None: length = len(self) _csum = self._read_fileng(2) _tlen = self._read_unpack(2) _ctrl = self._read_unpack(1) _type = self._read_unpack(1) _dsta = self._read_ipx_address() _srca = self._read_ipx_address() ipx = dict( chksum=_csum, len=_tlen, count=_ctrl, type=TYPE.get(_type), dst=_dsta, src=_srca, ) proto = ipx['type'] length = ipx['len'] - 30 ipx['packet'] = self._read_packet(header=30, payload=length) return self._decode_next_layer(ipx, proto, length)
Read Internetwork Packet Exchange. Structure of IPX header [RFC 1132]: Octets Bits Name Description 0 0 ipx.cksum Checksum 2 16 ipx.len Packet Length (header includes) 4 32 ipx.count Transport Control (hop count) 5 40 ipx.type Packet Type 6 48 ipx.dst Destination Address 18 144 ipx.src Source Address
def capture(self, *args, **kwargs): import traceback try: from StringIO import StringIO except ImportError: from io import StringIO stdout, stderr = sys.stdout, sys.stderr sys.stdout = out = StringIO() sys.stderr = err = StringIO() result = { 'exception': None, 'stderr': None, 'stdout': None, 'return': None } try: result['return'] = self.__call__(*args, **kwargs) except Exception: result['exception'] = traceback.format_exc() sys.stdout, sys.stderr = stdout, stderr result['stderr'] = err.getvalue() result['stdout'] = out.getvalue() return result
Run a task and return a dictionary with stderr, stdout and the return value. Also, the traceback from the exception if there was one
def poll_event(self): for e in tdl.event.get(): self.e.type = e.type if e.type == 'KEYDOWN': self.e.key = e.key return self.e.gettuple()
Wait for an event and return it. Returns a tuple: (type, unicode character, key, mod, width, height, mousex, mousey).
def _is_excluded(self, path, dir_only): return self.npatterns and self._match_excluded(path, self.npatterns)
Check if file is excluded.
def create(self, **kwargs): response = self.ghost.execute_post('%s/' % self._type_name, json={ self._type_name: [ kwargs ] }) return self._model_type(response.get(self._type_name)[0])
Creates a new resource. :param kwargs: The properties of the resource :return: The created item returned by the API wrapped as a `Model` object
def stop(self, timeout=5): for worker in self._threads: self._queue.put(_SHUTDOWNREQUEST) current = threading.currentThread() if timeout is not None and timeout >= 0: endtime = time.time() + timeout while self._threads: worker = self._threads.pop() if worker is not current and worker.isAlive(): try: if timeout is None or timeout < 0: worker.join() else: remaining_time = endtime - time.time() if remaining_time > 0: worker.join(remaining_time) if worker.isAlive(): c = worker.conn if c and not c.rfile.closed: try: c.socket.shutdown(socket.SHUT_RD) except TypeError: c.socket.shutdown() worker.join() except ( AssertionError, KeyboardInterrupt, ): pass
Terminate all worker threads. Args: timeout (int): time to wait for threads to stop gracefully
def type_and_times(type_: str, start: Timestamp, end: Timestamp, probability: Number = None) -> str: if not type_: return '' if type_ == 'BECMG': return f"At {start.dt.hour or 'midnight'} zulu becoming" ret = f"From {start.dt.hour or 'midnight'} to {end.dt.hour or 'midnight'} zulu," if probability and probability.value: ret += f" there's a {probability.value}% chance for" if type_ == 'INTER': ret += ' intermittent' elif type_ == 'TEMPO': ret += ' temporary' return ret
Format line type and times into the beginning of a spoken line string
def pad_to_multiple(obj, factor): _check_supported(obj) copied = deepcopy(obj) copied.pad_to_multiple(factor) return copied
Return a copy of the object with its piano-roll padded with zeros at the end along the time axis with the minimal length that make the length of the resulting piano-roll a multiple of `factor`. Parameters ---------- factor : int The value which the length of the resulting piano-roll will be a multiple of.
def list_all_requests_view(request, requestType): request_type = get_object_or_404(RequestType, url_name=requestType) requests = Request.objects.filter(request_type=request_type) if not request_type.managers.filter(incumbent__user=request.user): requests = requests.exclude( ~Q(owner__user=request.user), private=True, ) page_name = "Archives - All {0} Requests".format(request_type.name.title()) return render_to_response('list_requests.html', { 'page_name': page_name, 'requests': requests, 'request_type': request_type, }, context_instance=RequestContext(request))
Show all the requests for a given type in list form.
def umask(self, new_mask): if not is_int_type(new_mask): raise TypeError('an integer is required') old_umask = self.filesystem.umask self.filesystem.umask = new_mask return old_umask
Change the current umask. Args: new_mask: (int) The new umask value. Returns: The old umask. Raises: TypeError: if new_mask is of an invalid type.
def linkify_with_timeperiods(self, timeperiods, prop): for i in self: if not hasattr(i, prop): continue tpname = getattr(i, prop).strip() if not tpname: setattr(i, prop, '') continue timeperiod = timeperiods.find_by_name(tpname) if timeperiod is None: i.add_error("The %s of the %s '%s' named '%s' is unknown!" % (prop, i.__class__.my_type, i.get_name(), tpname)) continue setattr(i, prop, timeperiod.uuid)
Link items with timeperiods items :param timeperiods: all timeperiods object :type timeperiods: alignak.objects.timeperiod.Timeperiods :param prop: property name :type prop: str :return: None
def total_reads_from_grabix(in_file): gbi_file = _get_grabix_index(in_file) if gbi_file: with open(gbi_file) as in_handle: next(in_handle) num_lines = int(next(in_handle).strip()) assert num_lines % 4 == 0, "Expected lines to be multiple of 4" return num_lines // 4 else: return 0
Retrieve total reads in a fastq file from grabix index.
def merge_layouts(layouts): layout = layouts[0].clone() for l in layouts[1:]: layout.files.update(l.files) layout.domains.update(l.domains) for k, v in l.entities.items(): if k not in layout.entities: layout.entities[k] = v else: layout.entities[k].files.update(v.files) return layout
Utility function for merging multiple layouts. Args: layouts (list): A list of BIDSLayout instances to merge. Returns: A BIDSLayout containing merged files and entities. Notes: Layouts will be merged in the order of the elements in the list. I.e., the first Layout will be updated with all values in the 2nd Layout, then the result will be updated with values from the 3rd Layout, etc. This means that order matters: in the event of entity or filename conflicts, later layouts will take precedence.
def returner(ret): serv = _get_serv(ret) json_return = salt.utils.json.dumps(ret['return']) del ret['return'] json_full_ret = salt.utils.json.dumps(ret) if "influxdb08" in serv.__module__: req = [ { 'name': 'returns', 'columns': ['fun', 'id', 'jid', 'return', 'full_ret'], 'points': [ [ret['fun'], ret['id'], ret['jid'], json_return, json_full_ret] ], } ] else: req = [ { 'measurement': 'returns', 'tags': { 'fun': ret['fun'], 'id': ret['id'], 'jid': ret['jid'] }, 'fields': { 'return': json_return, 'full_ret': json_full_ret } } ] try: serv.write_points(req) except Exception as ex: log.critical('Failed to store return with InfluxDB returner: %s', ex)
Return data to a influxdb data store
def freeze_tag(name): def decorator(func): setattr(func, FREEZING_TAG_ATTRIBUTE, name) return func return decorator
This is not using decorator.py because we need to access original function not the wrapper.
def results(self): results = self.recommendations() transformed = [] for t in results['results']: if len(t) == 2: cid, fc = t info = {} elif len(t) == 3: cid, fc, info = t else: bottle.abort(500, 'Invalid search result: "%r"' % t) result = info result['content_id'] = cid if not self.params['omit_fc']: result['fc'] = util.fc_to_json(fc) transformed.append(result) results['results'] = transformed return results
Returns results as a JSON encodable Python value. This calls :meth:`SearchEngine.recommendations` and converts the results returned into JSON encodable values. Namely, feature collections are slimmed down to only features that are useful to an end-user.
def get_json(jsonpath, default): from os import path import json result = default if path.isfile(jsonpath): try: with open(jsonpath) as f: result = json.load(f, object_pairs_hook=load_with_datetime) except(IOError): err("Unable to deserialize JSON at {}".format(jsonpath)) pass return result
Returns the JSON serialized object at the specified path, or the default if it doesn't exist or can't be deserialized.
def convert(self, targetunits): nunits = units.Units(targetunits) self.waveunits = nunits
Set new user unit, for wavelength only. This effectively converts the spectrum wavelength to given unit. Note that actual data are always kept in internal unit (Angstrom), and only converted to user unit by :meth:`GetWaveSet` during actual computation. User unit is stored in ``self.waveunits``. Throughput is unitless and cannot be converted. Parameters ---------- targetunits : str New unit name, as accepted by `~pysynphot.units.Units`.
def list_quota_volume(name): cmd = 'volume quota {0}'.format(name) cmd += ' list' root = _gluster_xml(cmd) if not _gluster_ok(root): return None ret = {} for limit in _iter(root, 'limit'): path = limit.find('path').text ret[path] = _etree_to_dict(limit) return ret
List quotas of glusterfs volume name Name of the gluster volume CLI Example: .. code-block:: bash salt '*' glusterfs.list_quota_volume <volume>
def chunks_str(str, n, separator="\n", fill_blanks_last=True): return separator.join(chunks(str, n))
returns lines with max n characters :Example: >>> print (chunks_str('123456X', 3)) 123 456 X
def owner(self): obj = javabridge.call(self.jobject, "getOwner", "()Lweka/core/CapabilitiesHandler;") if obj is None: return None else: return JavaObject(jobject=obj)
Returns the owner of these capabilities, if any. :return: the owner, can be None :rtype: JavaObject
def make_userdir(child): userdir = os.path.dirname(child) if not os.path.isdir(userdir): if os.name == 'nt': userdir += "." os.mkdir(userdir, 0700)
Create a child directory.
def set_chat_title( self, chat_id: Union[int, str], title: str ) -> bool: peer = self.resolve_peer(chat_id) if isinstance(peer, types.InputPeerChat): self.send( functions.messages.EditChatTitle( chat_id=peer.chat_id, title=title ) ) elif isinstance(peer, types.InputPeerChannel): self.send( functions.channels.EditTitle( channel=peer, title=title ) ) else: raise ValueError("The chat_id \"{}\" belongs to a user".format(chat_id)) return True
Use this method to change the title of a chat. Titles can't be changed for private chats. You must be an administrator in the chat for this to work and must have the appropriate admin rights. Note: In regular groups (non-supergroups), this method will only work if the "All Members Are Admins" setting is off. Args: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. title (``str``): New chat title, 1-255 characters. Returns: True on success. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. ``ValueError`` if a chat_id belongs to user.
def _weld_unary(array, weld_type, operation): if weld_type not in {WeldFloat(), WeldDouble()}: raise TypeError('Unary operation supported only on scalar f32 or f64') obj_id, weld_obj = create_weld_object(array) weld_template = 'map({array}, |e: {type}| {op}(e))' weld_obj.weld_code = weld_template.format(array=obj_id, type=weld_type, op=operation) return weld_obj
Apply operation on each element in the array. As mentioned by Weld, the operations follow the behavior of the equivalent C functions from math.h Parameters ---------- array : numpy.ndarray or WeldObject Data weld_type : WeldType Of the data operation : {'exp', 'log', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'sinh', 'cosh', 'tanh', 'erf'} Which unary operation to apply. Returns ------- WeldObject Representation of this computation.
def report_change(self, name, value, maxdiff=1, deltat=10): r = self.reports[name] if time.time() < r.last_report + deltat: return r.last_report = time.time() if math.fabs(r.value - value) < maxdiff: return r.value = value self.say("%s %u" % (name, value))
report a sensor change
def _verify_type(self, spec, path): field_type = spec['type'] if isinstance(field_type, Schema): if not set(spec.keys()).issubset(set(['type', 'required', 'nullable', 'default'])): raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path) return elif isinstance(field_type, Array): if not isinstance(field_type.contained_type, (type, Schema, Array, types.FunctionType)): raise SchemaFormatException("Unsupported field type contained by Array at {}.", path) elif not isinstance(field_type, type) and not isinstance(field_type, types.FunctionType): raise SchemaFormatException("Unsupported field type at {}. Type must be a type, a function, an Array or another Schema", path)
Verify that the 'type' in the spec is valid
def merge(self, other): assert self.refnames == other.refnames assert self.dirs == other.dirs assert self.lengths == other.lengths for i in range(2): if self.pos[i] is None: if other.pos[i] is None: raise Error('Error merging these two links:\n' + str(self) + '\n' + str(other)) self.pos[i] = other.pos[i] else: if other.pos[i] is not None: raise Error('Error merging these two links:\n' + str(self) + '\n' + str(other))
Merge another link into this one. Expected that each link was created from each mate from a pair. We only know both distances to contig ends when we have read info from both mappings in a BAM file. All other info should be the same.
def dump_np_vars(self, store_format='csv', delimiter=','): ret = False if self.system.files.no_output is True: logger.debug('no_output is True, thus no TDS dump saved ') return True if self.write_lst() and self.write_np_dat(store_format=store_format, delimiter=delimiter): ret = True return ret
Dump the TDS simulation data to files by calling subroutines `write_lst` and `write_np_dat`. Parameters ----------- store_format : str dump format in `('csv', 'txt', 'hdf5')` delimiter : str delimiter for the `csv` and `txt` format Returns ------- bool: success flag
def _decorate_routes(self): self.logger.debug("Decorating routes") self.app.add_url_rule('/<path:path>', 'catch', self.catch, methods=['GET', 'POST'], defaults={'path': ''}) self.app.add_url_rule('/', 'index', self.index, methods=['POST', 'GET']) self.app.add_url_rule('/feed', 'feed', self.feed, methods=['POST']) self.app.add_url_rule('/poll', 'poll', self.poll, methods=['POST'])
Decorates the routes to use within the flask app
def add_edge(self, u, v, **kwargs): if u != v: super(FactorGraph, self).add_edge(u, v, **kwargs) else: raise ValueError('Self loops are not allowed')
Add an edge between variable_node and factor_node. Parameters ---------- u, v: nodes Nodes can be any hashable Python object. Examples -------- >>> from pgmpy.models import FactorGraph >>> G = FactorGraph() >>> G.add_nodes_from(['a', 'b', 'c']) >>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4)) >>> G.add_nodes_from([phi1, phi2]) >>> G.add_edge('a', phi1)
def field_subset(f, inds, rank=0): f_dim_space = f.ndim - rank if inds.ndim > 2: raise Exception('Too many dimensions in indices array') if inds.ndim == 1: if f_dim_space == 1: return f[inds] else: raise Exception('Indices array is 1d but field is not') if inds.shape[1] != f_dim_space: raise Exception('Indices and field dimensions do not match') return f[tuple([inds[:, i] for i in range(inds.shape[1])])]
Return the value of a field at a subset of points. Parameters ---------- f: array, shape (a1, a2, ..., ad, r1, r2, ..., rrank) Rank-r field in d dimensions inds: integer array, shape (n, d) Index vectors rank: integer The rank of the field (0: scalar field, 1: vector field and so on). Returns ------- f_sub: array, shape (n, rank) The subset of field values.
def require_login(self, view_func): @wraps(view_func) def decorated(*args, **kwargs): if g.oidc_id_token is None: return self.redirect_to_auth_server(request.url) return view_func(*args, **kwargs) return decorated
Use this to decorate view functions that require a user to be logged in. If the user is not already logged in, they will be sent to the Provider to log in, after which they will be returned. .. versionadded:: 1.0 This was :func:`check` before.
def left(self, speed=1): self.right_motor.forward(speed) self.left_motor.backward(speed)
Make the robot turn left by running the right motor forward and left motor backward. :param float speed: Speed at which to drive the motors, as a value between 0 (stopped) and 1 (full speed). The default is 1.
def move_red_right(self): self = self.flip() if self.left is not NULL and self.left.left.red: self = self.rotate_right().flip() return self
Shuffle red to the right of a tree.
def read_config(*args): ret = {} if _TRAFFICCTL: cmd = _traffic_ctl('config', 'get') else: cmd = _traffic_line('-r') try: for arg in args: log.debug('Querying: %s', arg) ret[arg] = _subprocess(cmd + [arg]) except KeyError: pass return ret
Read Traffic Server configuration variable definitions. .. versionadded:: 2016.11.0 .. code-block:: bash salt '*' trafficserver.read_config proxy.config.http.keep_alive_post_out
def to_dict(self): data = self.extract_fields() for key, attr in self.attributes.iteritems(): if key in self.ignore: continue value = getattr(self.context, attr, None) if value is None: value = getattr(self, attr, None) if callable(value): value = value() data[key] = api.to_json_value(self.context, key, value) return data
extract the data of the content and return it as a dictionary
def calculate_job_input_hash(job_spec, workflow_json): if 'workflow_workspace' in job_spec: del job_spec['workflow_workspace'] job_md5_buffer = md5() job_md5_buffer.update(json.dumps(job_spec).encode('utf-8')) job_md5_buffer.update(json.dumps(workflow_json).encode('utf-8')) return job_md5_buffer.hexdigest()
Calculate md5 hash of job specification and workflow json.
def _define_helper(flag_name, default_value, docstring, flagtype, required): option_name = flag_name if required else "--%s" % flag_name get_context_parser().add_argument( option_name, default=default_value, help=docstring, type=flagtype)
Registers 'flag_name' with 'default_value' and 'docstring'.
def enable(step: 'projects.ProjectStep'): restore_default_configuration() stdout_interceptor = RedirectBuffer(sys.stdout) sys.stdout = stdout_interceptor step.report.stdout_interceptor = stdout_interceptor stderr_interceptor = RedirectBuffer(sys.stderr) sys.stderr = stderr_interceptor step.report.stderr_interceptor = stderr_interceptor stdout_interceptor.active = True stderr_interceptor.active = True
Create a print equivalent function that also writes the output to the project page. The write_through is enabled so that the TextIOWrapper immediately writes all of its input data directly to the underlying BytesIO buffer. This is needed so that we can safely access the buffer data in a multi-threaded environment to display updates while the buffer is being written to. :param step:
def format_value(self, value, padding): if padding: return "{:0{pad}d}".format(value, pad=padding) else: return str(value)
Get padding adjusting for negative values.
def patch(self, url, body="", headers={}, retry=True): return self.request(url=url, method="PATCH", body=body, headers=headers, retry=retry)
Execute an HTTP PATCH request and return a dict containing the response and the response status code. Keyword arguments: url -- The path to execute the result against, not including the API version or project ID, with no leading /. Required. body -- A string or file object to send as the body of the request. Defaults to an empty string. headers -- HTTP Headers to send with the request. Can overwrite the defaults. Defaults to {}. retry -- Whether exponential backoff should be employed. Defaults to True.
def create_key(self, title, key): created = None if title and key: url = self._build_url('user', 'keys') req = self._post(url, data={'title': title, 'key': key}) json = self._json(req, 201) if json: created = Key(json, self) return created
Create a new key for the authenticated user. :param str title: (required), key title :param key: (required), actual key contents, accepts path as a string or file-like object :returns: :class:`Key <github3.users.Key>`
def _process_cidr_file(self, file): data = {'cidr': list(), 'countries': set(), 'city_country_mapping': dict()} allowed_countries = settings.IPGEOBASE_ALLOWED_COUNTRIES for cidr_info in self._line_to_dict(file, field_names=settings.IPGEOBASE_CIDR_FIELDS): city_id = cidr_info['city_id'] if cidr_info['city_id'] != '-' else None if city_id is not None: data['city_country_mapping'].update({cidr_info['city_id']: cidr_info['country_code']}) if allowed_countries and cidr_info['country_code'] not in allowed_countries: continue data['cidr'].append({'start_ip': cidr_info['start_ip'], 'end_ip': cidr_info['end_ip'], 'country_id': cidr_info['country_code'], 'city_id': city_id}) data['countries'].add(cidr_info['country_code']) return data
Iterate over ip info and extract useful data
def increase_indent(func): def wrapper(*args, **kwargs): global _debug_indent _debug_indent += 1 result = func(*args, **kwargs) _debug_indent -= 1 return result return wrapper
Decorator for makin
def listMetaContentTypes(self): all_md_content_types = ( CT_CORE_PROPS, CT_EXT_PROPS, CT_CUSTOM_PROPS) return [k for k in self.overrides.keys() if k in all_md_content_types]
The content types with metadata @return: ['application/xxx', ...]
def _get_settings(self): url = self._global_settings_url payload = {} res = self._send_request('GET', url, payload, 'settings') if res and res.status_code in self._resp_ok: return res.json()
Get global mobility domain from DCNM.
def templates_match(self, path): template_path = get_template_path(self.template_dir, path) key = 'hardening:template:%s' % template_path template_checksum = file_hash(template_path) kv = unitdata.kv() stored_tmplt_checksum = kv.get(key) if not stored_tmplt_checksum: kv.set(key, template_checksum) kv.flush() log('Saved template checksum for %s.' % template_path, level=DEBUG) return False elif stored_tmplt_checksum != template_checksum: kv.set(key, template_checksum) kv.flush() log('Updated template checksum for %s.' % template_path, level=DEBUG) return False return True
Determines if the template files are the same. The template file equality is determined by the hashsum of the template files themselves. If there is no hashsum, then the content cannot be sure to be the same so treat it as if they changed. Otherwise, return whether or not the hashsums are the same. :param path: the path to check :returns: boolean
def readline(self, size=None): if self._pos >= self.length: return '' if size: amount = min(size, (self.length - self._pos)) else: amount = self.length - self._pos out = self.stream.readline(amount) self._pos += len(out) return out
Read a line from the stream, including the trailing new line character. If `size` is set, don't read more than `size` bytes, even if the result does not represent a complete line. The last line read may not include a trailing new line character if one was not present in the underlying stream.
def path_wo_ns(obj): if isinstance(obj, pywbem.CIMInstance): path = obj.path.copy() elif isinstance(obj, pywbem.CIMInstanceName): path = obj.copy() else: assert False path.host = None path.namespace = None return path
Return path of an instance or instance path without host or namespace. Creates copy of the object so the original is not changed.
def field_to_markdown(field): if "title" in field: field_title = "**{}**".format(field["title"]) else: raise Exception("Es necesario un `title` para describir un campo.") field_type = " ({})".format(field["type"]) if "type" in field else "" field_desc = ": {}".format( field["description"]) if "description" in field else "" text_template = "{title}{type}{description}" text = text_template.format(title=field_title, type=field_type, description=field_desc) return text
Genera texto en markdown a partir de los metadatos de un `field`. Args: field (dict): Diccionario con metadatos de un `field`. Returns: str: Texto que describe un `field`.
def catch_gzip_errors(f): def new_f(self, *args, **kwargs): try: return f(self, *args, **kwargs) except requests.exceptions.ContentDecodingError as e: log.warning("caught gzip error: %s", e) self.connect() return f(self, *args, **kwargs) return new_f
A decorator to handle gzip encoding errors which have been known to happen during hydration.
def mapillary_tag_exists(self): description_tag = "Image ImageDescription" if description_tag not in self.tags: return False for requirement in ["MAPSequenceUUID", "MAPSettingsUserKey", "MAPCaptureTime", "MAPLongitude", "MAPLatitude"]: if requirement not in self.tags[description_tag].values or json.loads(self.tags[description_tag].values)[requirement] in ["", None, " "]: return False return True
Check existence of required Mapillary tags
async def update_pin(**payload): data = payload["data"] web_client = payload["web_client"] channel_id = data["channel_id"] user_id = data["user"] onboarding_tutorial = onboarding_tutorials_sent[channel_id][user_id] onboarding_tutorial.pin_task_completed = True message = onboarding_tutorial.get_message_payload() updated_message = await web_client.chat_update(**message) onboarding_tutorial.timestamp = updated_message["ts"]
Update the onboarding welcome message after recieving a "pin_added" event from Slack. Update timestamp for welcome message as well.
def _get_update_fields(model, uniques, to_update): fields = { field.attname: field for field in model._meta.fields } if to_update is None: to_update = [ field.attname for field in model._meta.fields ] to_update = [ attname for attname in to_update if (attname not in uniques and not getattr(fields[attname], 'auto_now_add', False) and not fields[attname].auto_created) ] return to_update
Get the fields to be updated in an upsert. Always exclude auto_now_add, auto_created fields, and unique fields in an update
def _get_arg_spec(func): args, varargs, keywords, defaults = inspect.getargspec(func) if defaults is None: defaults = {} else: defaulted_args = args[-len(defaults):] defaults = {name: val for name, val in zip(defaulted_args, defaults)} return args, varargs, defaults
Gets the argument spec of the given function, returning defaults as a dict of param names to values
def login(username=None, password=None, token=None, url=None, two_factor_callback=None): g = None if (username and password) or token: g = GitHubEnterprise(url) if url is not None else GitHub() g.login(username, password, token, two_factor_callback) return g
Construct and return an authenticated GitHub session. This will return a GitHubEnterprise session if a url is provided. :param str username: login name :param str password: password for the login :param str token: OAuth token :param str url: (optional), URL of a GitHub Enterprise instance :param func two_factor_callback: (optional), function you implement to provide the Two Factor Authentication code to GitHub when necessary :returns: :class:`GitHub <github3.github.GitHub>`
def generate_namelist_file(self, rapid_namelist_file): log("Generating RAPID namelist file ...", "INFO") try: os.remove(rapid_namelist_file) except OSError: pass with open(rapid_namelist_file, 'w') as new_file: new_file.write('&NL_namelist\n') for attr, value in sorted(list(self.__dict__.items())): if not attr.startswith('_'): if attr.startswith('BS'): new_file.write("{0} = .{1}.\n" .format(attr, str(value).lower())) elif isinstance(value, int): new_file.write("%s = %s\n" % (attr, value)) else: if value: if os.name == "nt": value = self._get_cygwin_path(value) new_file.write("%s = \'%s\'\n" % (attr, value)) new_file.write("/\n")
Generate rapid_namelist file. Parameters ---------- rapid_namelist_file: str Path of namelist file to generate from parameters added to the RAPID manager.
def sort(self): users = [] for _, group in itertools.groupby(sorted(self.commits), operator.attrgetter('author_mail')): if group: users.append(self.merge_user_commits(group)) self.sorted_commits = sorted(users, key=operator.attrgetter('line_count'), reverse=True) return self.sorted_commits
Sort by commit size, per author.
def ncVarUnit(ncVar): attributes = ncVarAttributes(ncVar) if not attributes: return '' for key in ('unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'): if key in attributes: return attributes[key] else: return ''
Returns the unit of the ncVar by looking in the attributes. It searches in the attributes for one of the following keys: 'unit', 'units', 'Unit', 'Units', 'UNIT', 'UNITS'. If these are not found, the empty string is returned.
def prepare_installed_requirement(self, req, require_hashes, skip_reason): assert req.satisfied_by, "req should have been satisfied but isn't" assert skip_reason is not None, ( "did not get skip reason skipped but req.satisfied_by " "is set to %r" % (req.satisfied_by,) ) logger.info( 'Requirement %s: %s (%s)', skip_reason, req, req.satisfied_by.version ) with indent_log(): if require_hashes: logger.debug( 'Since it is already installed, we are trusting this ' 'package without checking its hash. To ensure a ' 'completely repeatable environment, install into an ' 'empty virtualenv.' ) abstract_dist = Installed(req) return abstract_dist
Prepare an already-installed requirement
def run_tsne(self, X=None, metric='correlation', **kwargs): if(X is not None): dt = man.TSNE(metric=metric, **kwargs).fit_transform(X) return dt else: dt = man.TSNE(metric=self.distance, **kwargs).fit_transform(self.adata.obsm['X_pca']) tsne2d = dt self.adata.obsm['X_tsne'] = tsne2d
Wrapper for sklearn's t-SNE implementation. See sklearn for the t-SNE documentation. All arguments are the same with the exception that 'metric' is set to 'precomputed' by default, implying that this function expects a distance matrix by default.
def mix(self, ca, cb, xb): r = (1 - xb) * ca.red + xb * cb.red g = (1 - xb) * ca.green + xb * cb.green b = (1 - xb) * ca.blue + xb * cb.blue a = (1 - xb) * ca.alpha + xb * cb.alpha return gdk.RGBA(red=r, green=g, blue=b, alpha=a)
Mix colors. Args: ca (gdk.RGBA): first color cb (gdk.RGBA): second color xb (float): between 0.0 and 1.0 Return: gdk.RGBA: linear interpolation between ca and cb, 0 or 1 return the unaltered 1st or 2nd color respectively, as in CSS.
def _create_buffers(self): self.buffers = {} for step in self.graph.nodes(): num_buffers = 1 if isinstance(step, Reduction): num_buffers = len(step.parents) self.buffers[step] = Buffer(step.min_frames, step.left_context, step.right_context, num_buffers) return self.buffers
Create a buffer for every step in the pipeline.
def _insert_eups_version(c): eups_tag = os.getenv('EUPS_TAG') if eups_tag is None: eups_tag = 'd_latest' if eups_tag in ('d_latest', 'w_latest', 'current'): git_ref = 'master' elif eups_tag.startswith('d_'): git_ref = 'master' elif eups_tag.startswith('v'): git_ref = eups_tag.lstrip('v').replace('_', '.') elif eups_tag.startswith('w_'): git_ref = eups_tag.replace('_', '.') else: git_ref = 'master' c['release_eups_tag'] = eups_tag c['release_git_ref'] = git_ref c['version'] = eups_tag c['release'] = eups_tag c['scipipe_conda_ref'] = git_ref c['pipelines_demo_ref'] = git_ref c['newinstall_ref'] = git_ref return c
Insert information about the current EUPS tag into the configuration namespace. The variables are: ``release_eups_tag`` The EUPS tag (obtained from the ``EUPS_TAG`` environment variable, falling back to ``d_latest`` if not available). ``version``, ``release`` Same as ``release_eups_tag``. ``release_git_ref`` The git ref (branch or tag) corresponding ot the EUPS tag. ``scipipe_conda_ref`` Git ref for the https://github.com/lsst/scipipe_conda_env repo. ``newinstall_ref`` Git ref for the https://github.com/lsst/lsst repo. ``pipelines_demo_ref`` Git ref for the https://github.com/lsst/lsst_dm_stack_demo repo.
def extend(dset, array, **attrs): length = len(dset) if len(array) == 0: return length newlength = length + len(array) if array.dtype.name == 'object': shape = (newlength,) + preshape(array[0]) else: shape = (newlength,) + array.shape[1:] dset.resize(shape) dset[length:newlength] = array for key, val in attrs.items(): dset.attrs[key] = val return newlength
Extend an extensible dataset with an array of a compatible dtype. :param dset: an h5py dataset :param array: an array of length L :returns: the total length of the dataset (i.e. initial length + L)
def body(self): view = ffi.buffer(self.packet.m_body, self.packet.m_nBodySize) return view[:]
The body of the packet.
def get_shear_vel(self, saturated): try: if saturated: return np.sqrt(self.g_mod / self.unit_sat_mass) else: return np.sqrt(self.g_mod / self.unit_dry_mass) except TypeError: return None
Calculate the shear wave velocity :param saturated: bool, if true then use saturated mass :return:
def ijk_to_xyz(dset,ijk): i = nl.dset_info(dset) orient_codes = [int(x) for x in nl.run(['@AfniOrient2RAImap',i.orient]).output.split()] orient_is = [abs(x)-1 for x in orient_codes] rai = [] for rai_i in xrange(3): ijk_i = orient_is[rai_i] if orient_codes[rai_i] > 0: rai.append(ijk[ijk_i]*i.voxel_size[rai_i] + i.spatial_from[rai_i]) else: rai.append(i.spatial_to[rai_i] - ijk[ijk_i]*i.voxel_size[rai_i]) return rai
convert the dset indices ``ijk`` to RAI coordinates ``xyz``
def getValue(self, prop, default=None): f = self.props.get(prop, None) if not f: return default if isinstance(f, Feature): return f.getValue() if isinstance(f, tuple): if f[0]: return f[0].getValue() elif f[1]: return f[1].getValue() raise Exception("Getting value from a property with a constrain") return f
Return the value of feature with that name or ``default``.
def supported_device(self, index=0): if not util.is_natural(index) or index >= self.num_supported_devices(): raise ValueError('Invalid index.') info = structs.JLinkDeviceInfo() result = self._dll.JLINKARM_DEVICE_GetInfo(index, ctypes.byref(info)) return info
Gets the device at the given ``index``. Args: self (JLink): the ``JLink`` instance index (int): the index of the device whose information to get Returns: A ``JLinkDeviceInfo`` describing the requested device. Raises: ValueError: if index is less than 0 or >= supported device count.
def get_font_face(self): return FontFace._from_pointer( cairo.cairo_get_font_face(self._pointer), incref=True)
Return the current font face. :param font_face: A new :class:`FontFace` object wrapping an existing cairo object.
def search_continuous_sets(self, dataset_id): request = protocol.SearchContinuousSetsRequest() request.dataset_id = dataset_id request.page_size = pb.int(self._page_size) return self._run_search_request( request, "continuoussets", protocol.SearchContinuousSetsResponse)
Returns an iterator over the ContinuousSets fulfilling the specified conditions from the specified Dataset. :param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset` of interest. :return: An iterator over the :class:`ga4gh.protocol.ContinuousSet` objects defined by the query parameters.
def to_array(self): array = super(PassportElementErrorFiles, self).to_array() array['source'] = u(self.source) array['type'] = u(self.type) array['file_hashes'] = self._as_array(self.file_hashes) array['message'] = u(self.message) return array
Serializes this PassportElementErrorFiles to a dictionary. :return: dictionary representation of this object. :rtype: dict
def parse_action(action, parsed): if action == "list": list_env() elif action == "new": new_env(parsed.environment) elif action == "remove": remove_env(parsed.environment) elif action == "show": show_env(parsed.environment) elif action == "start": start_env(parsed.environment, parsed.path)
Parse the action to execute.
def fmt_pairs(obj, indent=4, sort_key=None): lengths = [len(x[0]) for x in obj] if not lengths: return '' longest = max(lengths) obj = sorted(obj, key=sort_key) formatter = '%s{: <%d} {}' % (' ' * indent, longest) string = '\n'.join([formatter.format(k, v) for k, v in obj]) return string
Format and sort a list of pairs, usually for printing. If sort_key is provided, the value will be passed as the 'key' keyword argument of the sorted() function when sorting the items. This allows for the input such as [('A', 3), ('B', 5), ('Z', 1)] to be sorted by the ints but formatted like so: l = [('A', 3), ('B', 5), ('Z', 1)] print(fmt_pairs(l, sort_key=lambda x: x[1])) Z 1 A 3 B 5 where the default behavior would be: print(fmt_pairs(l)) A 3 B 5 Z 1