code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def set_id(self, identifier): self._id = identifier refobj = self.get_refobj() if refobj: self.get_refobjinter().set_id(refobj, identifier)
Set the id of the given reftrack This will set the id on the refobject :param identifier: the identifier number :type identifier: int :returns: None :rtype: None :raises: None
def transition_to_execute_complete(self): assert self.state in [AQStateMachineStates.execute] self.state = AQStateMachineStates.execute_complete
Transition to execute complate
def remove_user(config, group, username): client = Client() client.prepare_connection() group_api = API(client) try: group_api.remove_user(group, username) except ldap_tools.exceptions.NoGroupsFound: print("Group ({}) not found".format(group)) except ldap_tools.exceptions.TooManyResults: print("Query for group ({}) returned multiple results.".format( group)) except ldap3.NO_SUCH_ATTRIBUTE: print("{} does not exist in {}".format(username, group))
Remove specified user from specified group.
def add_sockets(self, sockets: Iterable[socket.socket]) -> None: for sock in sockets: self._sockets[sock.fileno()] = sock self._handlers[sock.fileno()] = add_accept_handler( sock, self._handle_connection )
Makes this server start accepting connections on the given sockets. The ``sockets`` parameter is a list of socket objects such as those returned by `~tornado.netutil.bind_sockets`. `add_sockets` is typically used in combination with that method and `tornado.process.fork_processes` to provide greater control over the initialization of a multi-process server.
def get_categories(context, template='zinnia/tags/categories.html'): return {'template': template, 'categories': Category.published.all().annotate( count_entries_published=Count('entries')), 'context_category': context.get('category')}
Return the published categories.
def compute_empirical(cls, X): z_left = [] z_right = [] L = [] R = [] U, V = cls.split_matrix(X) N = len(U) base = np.linspace(EPSILON, 1.0 - EPSILON, COMPUTE_EMPIRICAL_STEPS) for k in range(COMPUTE_EMPIRICAL_STEPS): left = sum(np.logical_and(U <= base[k], V <= base[k])) / N right = sum(np.logical_and(U >= base[k], V >= base[k])) / N if left > 0: z_left.append(base[k]) L.append(left / base[k] ** 2) if right > 0: z_right.append(base[k]) R.append(right / (1 - z_right[k]) ** 2) return z_left, L, z_right, R
Compute empirical distribution.
def get_thumbnail(file_, geometry_string, **options): return default.backend.get_thumbnail(file_, geometry_string, **options)
A shortcut for the Backend ``get_thumbnail`` method
async def get_tree(self, prefix, *, dc=None, separator=None, watch=None, consistency=None): response = await self._read(prefix, dc=dc, recurse=True, separator=separator, watch=watch, consistency=consistency) result = response.body for data in result: data["Value"] = decode_value(data["Value"], data["Flags"]) return consul(result, meta=extract_meta(response.headers))
Gets all keys with a prefix of Key during the transaction. Parameters: prefix (str): Prefix to fetch separator (str): List only up to a given separator dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: CollectionMeta: where value is a list of values This does not fail the transaction if the Key doesn't exist. Not all keys may be present in the results if ACLs do not permit them to be read.
def check_virtualserver(self, name): vs = self.bigIP.LocalLB.VirtualServer for v in vs.get_list(): if v.split('/')[-1] == name: return True return False
Check to see if a virtual server exists
def readme(): from livereload import Server server = Server() server.watch("README.rst", "py cute.py readme_build") server.serve(open_url_delay=1, root="build/readme")
Live reload readme
def get_ordered_devices(): libcudart = get_libcudart() devices = {} for i in range(0, get_installed_devices()): gpu = get_device_properties(i) pciBusId = ctypes.create_string_buffer(64) libcudart.cudaDeviceGetPCIBusId(ctypes.byref(pciBusId), 64, i) full_id = pciBusId.value.decode('utf-8') gpu['fullId'] = full_id devices[full_id] = gpu ordered = [] i = 0 for key in sorted(devices): devices[key]['id'] = i ordered.append(devices[key]) i += 1 del libcudart return ordered
Default CUDA_DEVICE_ORDER is not compatible with nvidia-docker. Nvidia-Docker is using CUDA_DEVICE_ORDER=PCI_BUS_ID. https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolation
def get_lambda_to_execute(self): def y(update_progress_func, cancel_job_func): func = import_stringified_func(self.func) extrafunckwargs = {} args, kwargs = copy.copy(self.args), copy.copy(self.kwargs) if self.track_progress: extrafunckwargs["update_progress"] = partial(update_progress_func, self.job_id) if self.cancellable: extrafunckwargs["check_for_cancel"] = partial(cancel_job_func, self.job_id) kwargs.update(extrafunckwargs) return func(*args, **kwargs) return y
return a function that executes the function assigned to this job. If job.track_progress is None (the default), the returned function accepts no argument and simply needs to be called. If job.track_progress is True, an update_progress function is passed in that can be used by the function to provide feedback progress back to the job scheduling system. :return: a function that executes the original function assigned to this job.
def _single_request(self, method, *args, **kwargs): _method = self._service for item in method.split('.'): if method.endswith(item): _method = getattr(_method, item)(*args, **kwargs) else: _method = getattr(_method, item)() _method.uri = _method.uri.replace('$ENDPOINT', self._endpoint) try: return _method.execute(http=self._http) except googleapiclient.errors.HttpError as exc: response = json.loads(exc.content.decode('utf-8'))['error'] raise APIError(code=response['code'], message=response['message'], http_error=exc)
Make a single request to the fleet API endpoint Args: method (str): A dot delimited string indicating the method to call. Example: 'Machines.List' *args: Passed directly to the method being called. **kwargs: Passed directly to the method being called. Returns: dict: The response from the method called. Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400
def make_variant(cls, converters, re_opts=None, compiled=False, strict=True): assert converters, "REQUIRE: Non-empty list." if len(converters) == 1: return converters[0] if re_opts is None: re_opts = cls.default_re_opts pattern = r")|(".join([tc.pattern for tc in converters]) pattern = r"("+ pattern + ")" group_count = len(converters) for converter in converters: group_count += pattern_group_count(converter.pattern) if compiled: convert_variant = cls.__create_convert_variant_compiled(converters, re_opts, strict) else: convert_variant = cls.__create_convert_variant(re_opts, strict) convert_variant.pattern = pattern convert_variant.converters = tuple(converters) convert_variant.regex_group_count = group_count return convert_variant
Creates a type converter for a number of type converter alternatives. The first matching type converter is used. REQUIRES: type_converter.pattern attribute :param converters: List of type converters as alternatives. :param re_opts: Regular expression options zu use (=default_re_opts). :param compiled: Use compiled regexp matcher, if true (=False). :param strict: Enable assertion checks. :return: Type converter function object. .. note:: Works only with named fields in :class:`parse.Parser`. Parser needs group_index delta for unnamed/fixed fields. This is not supported for user-defined types. Otherwise, you need to use :class:`parse_type.parse.Parser` (patched version of the :mod:`parse` module).
def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.build_slug())[:self._meta.get_field("slug").max_length] if not self.is_indexed: if kwargs is None: kwargs = {} kwargs["index"] = False content = super(Content, self).save(*args, **kwargs) index_content_contributions.delay(self.id) index_content_report_content_proxy.delay(self.id) post_to_instant_articles_api.delay(self.id) return content
creates the slug, queues up for indexing and saves the instance :param args: inline arguments (optional) :param kwargs: keyword arguments :return: `bulbs.content.Content`
def add(self, documents, boost=None): if not isinstance(documents, list): documents = [documents] documents = [{'doc': d} for d in documents] if boost: for d in documents: d['boost'] = boost self._add_batch.extend(documents) if len(self._add_batch) > SOLR_ADD_BATCH: self._addFlushBatch()
Adds documents to Solr index documents - Single item or list of items to add
def _templates_match(t, family_file): return t.name == family_file.split(os.sep)[-1].split('_detections.csv')[0]
Return True if a tribe matches a family file path. :type t: Tribe :type family_file: str :return: bool
def make_sized_handler(size, const_values, non_minimal_data_handler): const_values = list(const_values) def constant_size_opcode_handler(script, pc, verify_minimal_data=False): pc += 1 data = bytes_as_hex(script[pc:pc+size]) if len(data) < size: return pc+1, None if verify_minimal_data and data in const_values: non_minimal_data_handler("not minimal push of %s" % repr(data)) return pc+size, data return constant_size_opcode_handler
Create a handler for a data opcode that returns literal data of a fixed size.
def graph_repr(self): final = re.sub(r"[-+]?\d*\.\d+", lambda x: format(float(x.group(0)), '.2E'), self._expr) return "Expression:\\l {}\\l".format( final, )
Short repr to use when rendering Pipeline graphs.
def listen_loop(self, address, family, internal=False): try: sock = eventlet.listen(address, family) except socket.error, e: if e.errno == errno.EADDRINUSE: logging.critical("Cannot listen on (%s, %s): already in use" % (address, family)) raise elif e.errno == errno.EACCES and address[1] <= 1024: logging.critical("Cannot listen on (%s, %s) (you might need to launch as root)" % (address, family)) return logging.critical("Cannot listen on (%s, %s): %s" % (address, family, e)) return eventlet.sleep(0.5) logging.info("Listening for requests on %s" % (address, )) try: eventlet.serve( sock, lambda sock, addr: self.handle(sock, addr, internal), concurrency = 10000, ) finally: sock.close()
Accepts incoming connections.
def update(self, friendly_name=values.unset, voice_fallback_method=values.unset, voice_fallback_url=values.unset, voice_method=values.unset, voice_status_callback_method=values.unset, voice_status_callback_url=values.unset, voice_url=values.unset, sip_registration=values.unset, domain_name=values.unset): return self._proxy.update( friendly_name=friendly_name, voice_fallback_method=voice_fallback_method, voice_fallback_url=voice_fallback_url, voice_method=voice_method, voice_status_callback_method=voice_status_callback_method, voice_status_callback_url=voice_status_callback_url, voice_url=voice_url, sip_registration=sip_registration, domain_name=domain_name, )
Update the DomainInstance :param unicode friendly_name: A string to describe the resource :param unicode voice_fallback_method: The HTTP method used with voice_fallback_url :param unicode voice_fallback_url: The URL we should call when an error occurs in executing TwiML :param unicode voice_method: The HTTP method we should use with voice_url :param unicode voice_status_callback_method: The HTTP method we should use to call voice_status_callback_url :param unicode voice_status_callback_url: The URL that we should call to pass status updates :param unicode voice_url: The URL we should call when receiving a call :param bool sip_registration: Whether SIP registration is allowed :param unicode domain_name: The unique address on Twilio to route SIP traffic :returns: Updated DomainInstance :rtype: twilio.rest.api.v2010.account.sip.domain.DomainInstance
def get_all_manifests(image, registry, insecure=False, dockercfg_path=None, versions=('v1', 'v2', 'v2_list')): digests = {} registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path) for version in versions: response, _ = get_manifest(image, registry_session, version) if response: digests[version] = response return digests
Return manifest digests for image. :param image: ImageName, the remote image to inspect :param registry: str, URI for registry, if URI schema is not provided, https:// will be used :param insecure: bool, when True registry's cert is not verified :param dockercfg_path: str, dirname of .dockercfg location :param versions: tuple, for which manifest schema versions to fetch manifests :return: dict of successful responses, with versions as keys
def run(self,field=None,simple=False,force=False): if field is None: fields = [1,2] else: fields = [field] for filenames in self.filenames.compress(~self.filenames.mask['catalog']).data: infile = filenames['catalog'] for f in fields: outfile = filenames['mask_%i'%f] if os.path.exists(outfile) and not force: logger.info("Found %s; skipping..."%outfile) continue pixels,maglims=self.calculate(infile,f,simple) logger.info("Creating %s"%outfile) outdir = mkdir(os.path.dirname(outfile)) data = odict() data['PIXEL']=pixels data['MAGLIM']=maglims.astype('f4') ugali.utils.healpix.write_partial_map(outfile,data, self.nside_pixel)
Loop through pixels containing catalog objects and calculate the magnitude limit. This gets a bit convoluted due to all the different pixel resolutions...
def _format_linedata(linedata, indent, indent_width): lines = [] WIDTH = 78 - indent_width SPACING = 2 NAME_WIDTH_LOWER_BOUND = 13 NAME_WIDTH_UPPER_BOUND = 30 NAME_WIDTH = max([len(s) for s, d in linedata]) if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND: NAME_WIDTH = NAME_WIDTH_LOWER_BOUND elif NAME_WIDTH > NAME_WIDTH_UPPER_BOUND: NAME_WIDTH = NAME_WIDTH_UPPER_BOUND DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING for namestr, doc in linedata: line = indent + namestr if len(namestr) <= NAME_WIDTH: line += ' ' * (NAME_WIDTH + SPACING - len(namestr)) else: lines.append(line) line = indent + ' ' * (NAME_WIDTH + SPACING) line += _summarize_doc(doc, DOC_WIDTH) lines.append(line.rstrip()) return lines
Format specific linedata into a pleasant layout. "linedata" is a list of 2-tuples of the form: (<item-display-string>, <item-docstring>) "indent" is a string to use for one level of indentation "indent_width" is a number of columns by which the formatted data will be indented when printed. The <item-display-string> column is held to 30 columns.
def volume_attach(self, name, server_name, device='/dev/xvdb', timeout=300): try: volume = self.volume_show(name) except KeyError as exc: raise SaltCloudSystemExit('Unable to find {0} volume: {1}'.format(name, exc)) server = self.server_by_name(server_name) response = self.compute_conn.volumes.create_server_volume( server.id, volume['id'], device=device ) trycount = 0 start = time.time() while True: trycount += 1 try: response = self._volume_get(volume['id']) if response['status'] == 'in-use': return response except Exception as exc: log.debug('Volume is attaching: %s', name) time.sleep(1) if time.time() - start > timeout: log.error('Timed out after %s seconds ' 'while waiting for data', timeout) return False log.debug( 'Retrying volume_show() (try %s)', trycount )
Attach a block device
def _simpleparsefun(date): if hasattr(date, 'year'): return date try: date = datetime.datetime.strptime(date, '%Y-%m-%d') except ValueError: date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') return date
Simple date parsing function
def compute_csets_dTRAM(connectivity, count_matrices, nn=None, callback=None): r if connectivity=='post_hoc_RE' or connectivity=='BAR_variance': raise Exception('Connectivity type %s not supported for dTRAM data.'%connectivity) state_counts = _np.maximum(count_matrices.sum(axis=1), count_matrices.sum(axis=2)) return _compute_csets( connectivity, state_counts, count_matrices, None, None, None, nn=nn, callback=callback)
r""" Computes the largest connected sets for dTRAM data. Parameters ---------- connectivity : string one 'reversible_pathways', 'neighbors', 'summed_count_matrix' or None. Selects the algorithm for measuring overlap between thermodynamic and Markov states. * 'reversible_pathways' : requires that every state in the connected set can be reached by following a pathway of reversible transitions. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). A pathway of reversible transitions is a list of reversible transitions [(i_1, i_2), (i_2, i_3),..., (i_(N-2), i_(N-1)), (i_(N-1), i_N)]. The thermodynamic state where the reversible transitions happen, is ignored in constructing the reversible pathways. This is equivalent to assuming that two ensembles overlap at some Markov state whenever there exist frames from both ensembles in that Markov state. * 'largest' : alias for reversible_pathways * 'neighbors' : similar to 'reversible_pathways' but with a more strict requirement for the overlap between thermodynamic states. It is required that every state in the connected set can be reached by following a pathway of reversible transitions or jumping between overlapping thermodynamic states while staying in the same Markov state. A reversible transition between two Markov states (within the same thermodynamic state k) is a pair of Markov states that belong to the same strongly connected component of the count matrix (from thermodynamic state k). It is assumed that the data comes from an Umbrella sampling simulation and the number of the thermodynamic state matches the position of the Umbrella along the order parameter. The overlap of thermodynamic states k and l within Markov state n is set according to the value of nn; if there are samples in both product-space states (k,n) and (l,n) and |l-n|<=nn, the states are overlapping. * 'summed_count_matrix' : all thermodynamic states are assumed to overlap. The connected set is then computed by summing the count matrices over all thermodynamic states and taking it's largest strongly connected set. Not recommended! * None : assume that everything is connected. For debugging. count_matrices : numpy.ndarray((T, M, M)) Count matrices for all T thermodynamic states. nn : int or None, optional Number of neighbors that are assumed to overlap when connectivity='neighbors' Returns ------- csets, projected_cset csets : list of numpy.ndarray((M_prime_k,), dtype=int) List indexed by thermodynamic state. Every element csets[k] is the largest connected set at thermodynamic state k. projected_cset : numpy.ndarray(M_prime, dtype=int) The overall connected set. This is the union of the individual connected sets of the thermodynamic states.
def _get_subnets_table(subnets): table = formatting.Table(['id', 'network identifier', 'cidr', 'note']) for subnet in subnets: table.add_row([subnet.get('id', ''), subnet.get('networkIdentifier', ''), subnet.get('cidr', ''), subnet.get('note', '')]) return table
Yields a formatted table to print subnet details. :param List[dict] subnets: List of subnets. :return Table: Formatted for subnet output.
def delete(name, config=None): storm_ = get_storm_instance(config) try: storm_.delete_entry(name) print( get_formatted_message( 'hostname "{0}" deleted successfully.'.format(name), 'success') ) except ValueError as error: print(get_formatted_message(error, 'error'), file=sys.stderr) sys.exit(1)
Deletes a single host.
def combine_adjacent_lines(line_numbers): combine_template = "{0}-{1}" combined_list = [] line_numbers.append(None) start = line_numbers[0] end = None for line_number in line_numbers[1:]: if (end if end else start) + 1 == line_number: end = line_number else: if end: combined_list.append(combine_template.format(start, end)) else: combined_list.append(str(start)) start = line_number end = None return combined_list
Given a sorted collection of line numbers this will turn them to strings and combine adjacent values [1, 2, 5, 6, 100] -> ["1-2", "5-6", "100"]
def get_cgi_parameter_list(form: cgi.FieldStorage, key: str) -> List[str]: return form.getlist(key)
Extracts a list of values, all with the same key, from a CGI form.
def swap(self, position: int) -> None: idx = -1 * position - 1 try: self.values[-1], self.values[idx] = self.values[idx], self.values[-1] except IndexError: raise InsufficientStack("Insufficient stack items for SWAP{0}".format(position))
Perform a SWAP operation on the stack.
def get_next_scheduled_time(cron_string): itr = croniter.croniter(cron_string, datetime.utcnow()) return itr.get_next(datetime)
Calculate the next scheduled time by creating a crontab object with a cron string
def _sendFiles( self, files, message=None, thread_id=None, thread_type=ThreadType.USER ): thread_id, thread_type = self._getThread(thread_id, thread_type) data = self._getSendData( message=self._oldMessage(message), thread_id=thread_id, thread_type=thread_type, ) data["action_type"] = "ma-type:user-generated-message" data["has_attachment"] = True for i, (file_id, mimetype) in enumerate(files): data["{}s[{}]".format(mimetype_to_key(mimetype), i)] = file_id return self._doSendRequest(data)
Sends files from file IDs to a thread `files` should be a list of tuples, with a file's ID and mimetype
def mount(self, mountpoint, app, into_worker=False): self._set('worker-mount' if into_worker else 'mount', '%s=%s' % (mountpoint, app), multi=True) return self._section
Load application under mountpoint. Example: * .mount('', 'app0.py') -- Root URL part * .mount('/app1', 'app1.py') -- URL part * .mount('/pinax/here', '/var/www/pinax/deploy/pinax.wsgi') * .mount('the_app3', 'app3.py') -- Variable value: application alias (can be set by ``UWSGI_APPID``) * .mount('example.com', 'app2.py') -- Variable value: Hostname (variable set in nginx) * http://uwsgi-docs.readthedocs.io/en/latest/Nginx.html#hosting-multiple-apps-in-the-same-process-aka-managing-script-name-and-path-info :param str|unicode mountpoint: URL part, or variable value. .. note:: In case of URL part you may also want to set ``manage_script_name`` basic param to ``True``. .. warning:: In case of URL part a trailing slash may case problems in some cases (e.g. with Django based projects). :param str|unicode app: App module/file. :param bool into_worker: Load application under mountpoint in the specified worker or after workers spawn.
def get_objectives_by_ids(self, objective_ids): collection = JSONClientValidated('learning', collection='Objective', runtime=self._runtime) object_id_list = [] for i in objective_ids: object_id_list.append(ObjectId(self._get_id(i, 'learning').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.ObjectiveList(sorted_result, runtime=self._runtime, proxy=self._proxy)
Gets an ``ObjectiveList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the objectives specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``Objectives`` may be omitted from the list and may present the elements in any order including returning a unique set. arg: objective_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.learning.ObjectiveList) - the returned ``Objective`` list raise: NotFound - an ``Id was`` not found raise: NullArgument - ``objective_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def plot_counts(df, theme): dates, counts = df['date-observation'], df[theme + "_count"] fig, ax = plt.subplots() ax.set_ylabel("{} pixel counts".format(" ".join(theme.split("_")))) ax.set_xlabel("observation date") ax.plot(dates, counts, '.') fig.autofmt_xdate() plt.show()
plot the counts of a given theme from a created database over time
def get_all(self): components = [] self._lock.acquire() try: for reference in self._references: components.append(reference.get_component()) finally: self._lock.release() return components
Gets all component references registered in this reference map. :return: a list with component references.
def enable_thread_safety(self): if self.threadsafe: return if self._running.isSet(): raise RuntimeError('Cannot enable thread safety after start') def _getattr(obj, name): return getattr(obj, name, False) is True for name in dir(self): try: meth = getattr(self, name) except AttributeError: pass if not callable(meth): continue make_threadsafe = _getattr(meth, 'make_threadsafe') make_threadsafe_blocking = _getattr(meth, 'make_threadsafe_blocking') if make_threadsafe: assert not make_threadsafe_blocking meth = self._make_threadsafe(meth) setattr(self, name, meth) elif make_threadsafe_blocking: meth = self._make_threadsafe_blocking(meth) setattr(self, name, meth) self._threadsafe = True
Enable thread-safety features. Must be called before start().
def head(self, n=5): col = self.copy() col.query.setLIMIT(n) return col.toPandas()
Returns first n rows
def _hm_form_message( self, thermostat_id, protocol, source, function, start, payload ): if protocol == constants.HMV3_ID: start_low = (start & constants.BYTEMASK) start_high = (start >> 8) & constants.BYTEMASK if function == constants.FUNC_READ: payload_length = 0 length_low = (constants.RW_LENGTH_ALL & constants.BYTEMASK) length_high = (constants.RW_LENGTH_ALL >> 8) & constants.BYTEMASK else: payload_length = len(payload) length_low = (payload_length & constants.BYTEMASK) length_high = (payload_length >> 8) & constants.BYTEMASK msg = [ thermostat_id, 10 + payload_length, source, function, start_low, start_high, length_low, length_high ] if function == constants.FUNC_WRITE: msg = msg + payload type(msg) return msg else: assert 0, "Un-supported protocol found %s" % protocol
Forms a message payload, excluding CRC
def install(pkg, channel=None, refresh=False): args = [] ret = {'result': None, 'output': ""} if refresh: cmd = 'refresh' else: cmd = 'install' if channel: args.append('--channel=' + channel) try: ret['output'] = subprocess.check_output([SNAP_BINARY_NAME, cmd, pkg] + args, stderr=subprocess.STDOUT) ret['result'] = True except subprocess.CalledProcessError as e: ret['output'] = e.output ret['result'] = False return ret
Install the specified snap package from the specified channel. Returns a dictionary of "result" and "output". pkg The snap package name channel Optional. The snap channel to install from, eg "beta" refresh : False If True, use "snap refresh" instead of "snap install". This allows changing the channel of a previously installed package.
def add_scene(self, animation_id, name, color, velocity, config): if animation_id < 0 or animation_id >= len(self.state.animationClasses): err_msg = "Requested to register scene with invalid Animation ID. Out of range." logging.info(err_msg) return(False, 0, err_msg) if self.state.animationClasses[animation_id].check_config(config) is False: err_msg = "Requested to register scene with invalid configuration." logging.info(err_msg) return(False, 0, err_msg) self.state.sceneIdCtr += 1 self.state.scenes[self.state.sceneIdCtr] = Scene(animation_id, name, color, velocity, config) sequence_number = self.zmq_publisher.publish_scene_add(self.state.sceneIdCtr, animation_id, name, color, velocity, config) logging.debug("Registered new scene.") if self.state.activeSceneId is None: self.set_scene_active(self.state.sceneIdCtr) return (True, sequence_number, "OK")
Add a new scene, returns Scene ID
def sid(self): pnames = list(self.terms)+list(self.dterms) pnames.sort() return (self.__class__, tuple([(k, id(self.__dict__[k])) for k in pnames if k in self.__dict__]))
Semantic id.
def email_addresses595(self, key, value): emails = self.get('email_addresses', []) if value.get('o'): emails.append({ 'value': value.get('o'), 'current': False, 'hidden': True, }) if value.get('m'): emails.append({ 'value': value.get('m'), 'current': True, 'hidden': True, }) notes = self.get('_private_notes', []) new_note = ( { 'source': value.get('9'), 'value': _private_note, } for _private_note in force_list(value.get('a')) ) notes.extend(new_note) self['_private_notes'] = notes return emails
Populates the ``email_addresses`` field using the 595 MARCXML field. Also populates ``_private_notes`` as a side effect.
def send_signals(self): if self.flag: invalid_ipn_received.send(sender=self) return else: valid_ipn_received.send(sender=self)
Shout for the world to hear whether a txn was successful.
def _is_valid_cardinal(self, inpt, metadata): if not isinstance(inpt, int): return False if metadata.get_minimum_cardinal() and inpt < metadata.get_maximum_cardinal(): return False if metadata.get_maximum_cardinal() and inpt > metadata.get_minimum_cardinal(): return False if metadata.get_cardinal_set() and inpt not in metadata.get_cardinal_set(): return False else: return True
Checks if input is a valid cardinal value
def export_as_file(self, file_path, cv_source): if os.path.exists(file_path): raise exceptions.UserError('{} already exists'.format(file_path)) with open(file_path, 'wb') as f: f.write(self.export_as_code(cv_source).encode('utf8'))
Export the ensemble as a single Python file and saves it to `file_path`. This is EXPERIMENTAL as putting different modules together would probably wreak havoc especially on modules that make heavy use of global variables. Args: file_path (str, unicode): Absolute/local path of place to save file in cv_source (str, unicode): String containing actual code for base learner cross-validation used to generate secondary meta-features.
def pause(self): if self._end_time is not None: return self._end_time = datetime.datetime.now() self._elapsed_time += self._end_time - self._start_time
Pause the stopwatch. If the stopwatch is already paused, nothing will happen.
def generate(cache_fn): if not os.path.exists(cache_fn): print >> sys.stderr, "Can't access `%s`!" % cache_fn sys.exit(1) with SqliteDict(cache_fn) as db: for item in _pick_keywords(db): yield item
Go thru `cache_fn` and filter keywords. Store them in `keyword_list.json`. Args: cache_fn (str): Path to the file with cache. Returns: list: List of :class:`KeywordInfo` objects.
def get_all_publications(return_namedtuples=True): sources = [ ben_cz.get_publications, grada_cz.get_publications, cpress_cz.get_publications, zonerpress_cz.get_publications, ] publications = [] for source in sources: publications.extend( filters.filter_publications(source()) ) if return_namedtuples: publications = map(lambda x: x.to_namedtuple(), publications) return publications
Get list publications from all available source. Args: return_namedtuples (bool, default True): Convert :class:`.Publication` structures to namedtuples (used in AMQP communication). Returns: list: List of :class:`.Publication` structures converted to namedtuple.
def GetIcmpStatistics(): statistics = MIB_ICMP() _GetIcmpStatistics(byref(statistics)) results = _struct_to_dict(statistics) del(statistics) return results
Return all Windows ICMP stats from iphlpapi
def poisson(data): data = np.hstack(([0.0], np.array(data))) cumm = np.cumsum(data) def cost(s, t): diff = cumm[t]-cumm[s] if diff == 0: return -2 * diff * (- np.log(t-s) - 1) else: return -2 * diff * (np.log(diff) - np.log(t-s) - 1) return cost
Creates a segment cost function for a time series with a poisson distribution with changing mean Args: data (:obj:`list` of float): 1D time series data Returns: function: Function with signature (int, int) -> float where the first arg is the starting index, and the second is the last arg. Returns the cost of that segment
def sense_dep(self, target): if target.atr_req[15] & 0x30 == 0x30: self.log.warning("must reduce the max payload size in atr_req") target.atr_req[15] = (target.atr_req[15] & 0xCF) | 0x20 target = super(Device, self).sense_dep(target) if target is None: return if target.atr_res[16] & 0x30 == 0x30: self.log.warning("must reduce the max payload size in atr_res") target.atr_res[16] = (target.atr_res[16] & 0xCF) | 0x20 return target
Search for a DEP Target in active communication mode. Because the PN531 does not implement the extended frame syntax for host controller communication, it can not support the maximum payload size of 254 byte. The driver handles this by modifying the length-reduction values in atr_req and atr_res.
def insert(self, value): if not self.payload or value == self.payload: self.payload = value else: if value <= self.payload: if self.left: self.left.insert(value) else: self.left = BinaryTreeNode(value) else: if self.right: self.right.insert(value) else: self.right = BinaryTreeNode(value)
Insert a value in the tree
def get_authorization_url(self, client_id=None, instance_id=None, redirect_uri=None, region=None, scope=None, state=None): client_id = client_id or self.client_id instance_id = instance_id or self.instance_id redirect_uri = redirect_uri or self.redirect_uri region = region or self.region scope = scope or self.scope state = state or str(uuid.uuid4()) self.state = state return Request( 'GET', self.auth_base_url, params={ 'client_id': client_id, 'instance_id': instance_id, 'redirect_uri': redirect_uri, 'region': region, 'response_type': 'code', 'scope': scope, 'state': state } ).prepare().url, state
Generate authorization URL. Args: client_id (str): OAuth2 client ID. Defaults to ``None``. instance_id (str): App Instance ID. Defaults to ``None``. redirect_uri (str): Redirect URI. Defaults to ``None``. region (str): App Region. Defaults to ``None``. scope (str): Permissions. Defaults to ``None``. state (str): UUID to detect CSRF. Defaults to ``None``. Returns: str, str: Auth URL, state
def validate_regexp(pattern, flags=0): regex = re.compile(pattern, flags) if isinstance(pattern, str) else pattern def regexp_validator(field, data): if field.value is None: return if regex.match(str(field.value)) is None: raise ValidationError('regexp', pattern=pattern) return regexp_validator
Validate the field matches the given regular expression. Should work with anything that supports '==' operator. :param pattern: Regular expresion to match. String or regular expression instance. :param pattern: Flags for the regular expression. :raises: ``ValidationError('equal')``
def multiple_files_count_reads_in_windows(bed_files, args): bed_windows = OrderedDict() for bed_file in bed_files: logging.info("Binning " + bed_file) if ".bedpe" in bed_file: chromosome_dfs = count_reads_in_windows_paired_end(bed_file, args) else: chromosome_dfs = count_reads_in_windows(bed_file, args) bed_windows[bed_file] = chromosome_dfs return bed_windows
Use count_reads on multiple files and store result in dict. Untested since does the same thing as count reads.
def shortcut_app_id(shortcut): algorithm = Crc(width = 32, poly = 0x04C11DB7, reflect_in = True, xor_in = 0xffffffff, reflect_out = True, xor_out = 0xffffffff) crc_input = ''.join([shortcut.exe,shortcut.name]) high_32 = algorithm.bit_by_bit(crc_input) | 0x80000000 full_64 = (high_32 << 32) | 0x02000000 return str(full_64)
Generates the app id for a given shortcut. Steam uses app ids as a unique identifier for games, but since shortcuts dont have a canonical serverside representation they need to be generated on the fly. The important part about this function is that it will generate the same app id as Steam does for a given shortcut
def http_session(cookies=None): session = requests.Session() if cookies is not False: session.cookies.update(cookies or cookiejar()) session.headers.update({'User-Agent': 'ipsv/{v}'.format(v=ips_vagrant.__version__)}) return session
Generate a Requests session @param cookies: Cookies to load. None loads the app default CookieJar. False disables cookie loading. @type cookies: dict, cookielib.LWPCookieJar, None or False @rtype requests.Session
def risk_evidence(self, domain, **kwargs): return self._results('risk-evidence', '/v1/risk/evidence/', items_path=('components', ), domain=domain, **kwargs)
Returns back the detailed risk evidence associated with a given domain
def volume_usage(self, year=None, month=None): endpoint = '/'.join(( self.server_url, '_api', 'v2', 'usage', 'data_volume')) return self._usage_endpoint(endpoint, year, month)
Retrieves Cloudant volume usage data, optionally for a given year and month. :param int year: Year to query against, for example 2014. Optional parameter. Defaults to None. If used, it must be accompanied by ``month``. :param int month: Month to query against that must be an integer between 1 and 12. Optional parameter. Defaults to None. If used, it must be accompanied by ``year``. :returns: Volume usage data in JSON format
def apply(self, styles=None, verbose=False): PARAMS=set_param(["styles"],[styles]) response=api(url=self.__url+"/apply", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Applies the specified style to the selected views and returns the SUIDs of the affected views. :param styles (string): Name of Style to be applied to the selected views. = ['Directed', 'BioPAX_SIF', 'Bridging Reads Histogram:unique_0', 'PSIMI 25 Style', 'Coverage Histogram:best&unique', 'Minimal', 'Bridging Reads Histogram:best&unique_0', 'Coverage Histogram_0', 'Big Labels', 'No Histogram:best&unique_0', 'Bridging Reads Histogram:best', 'No Histogram_0', 'No Histogram:best&unique', 'Bridging Reads Histogram_0', 'Ripple', 'Coverage Histogram:unique_0', 'Nested Network Style', 'Coverage Histogram:best', 'Coverage Histogram:best&unique_0', 'default black', 'No Histogram:best_0', 'No Histogram:unique', 'No Histogram:unique_0', 'Solid', 'Bridging Reads Histogram:unique', 'No Histogram:best', 'Coverage Histogram', 'BioPAX', 'Bridging Reads Histogram', 'Coverage Histogram:best_0', 'Sample1', 'Universe', 'Bridging Reads Histogram:best_0', 'Coverage Histogram:unique', 'Bridging Reads Histogram:best&unique', 'No Histogram', 'default'] :param verbose: print more :returns: SUIDs of the affected views
def all_domain_events(self): for originator_id in self.record_manager.all_sequence_ids(): for domain_event in self.get_domain_events(originator_id=originator_id, page_size=100): yield domain_event
Yields all domain events in the event store.
def setup_handler(context): if context.readDataFile('senaite.lims.txt') is None: return logger.info("SENAITE setup handler [BEGIN]") portal = context.getSite() setup_html_filter(portal) logger.info("SENAITE setup handler [DONE]")
Generic setup handler
def unicode_name(self, name, in_group=False): value = ord(_unicodedata.lookup(name)) if (self.is_bytes and value > 0xFF): value = "" if not in_group and value == "": return '[^%s]' % ('\x00-\xff' if self.is_bytes else _uniprops.UNICODE_RANGE) elif value == "": return value else: return ['\\%03o' % value if value <= 0xFF else chr(value)]
Insert Unicode value by its name.
def get_related_model(field): model = None if hasattr(field, 'related_model') and field.related_model: model = field.related_model elif hasattr(field, 'rel') and field.rel: model = field.rel.to return model
Gets the related model from a related field
def GetUnclaimedCoins(self): unclaimed = [] neo = Blockchain.SystemShare().Hash for coin in self.GetCoins(): if coin.Output.AssetId == neo and \ coin.State & CoinState.Confirmed > 0 and \ coin.State & CoinState.Spent > 0 and \ coin.State & CoinState.Claimed == 0 and \ coin.State & CoinState.Frozen == 0 and \ coin.State & CoinState.WatchOnly == 0: unclaimed.append(coin) return unclaimed
Gets coins in the wallet that have not been 'claimed', or redeemed for their gas value on the blockchain. Returns: list: a list of ``neo.Wallet.Coin`` that have 'claimable' value
def _maybe_update_cacher(self, clear=False, verify_is_copy=True): cacher = getattr(self, '_cacher', None) if cacher is not None: ref = cacher[1]() if ref is None: del self._cacher else: try: ref._maybe_cache_changed(cacher[0], self) except Exception: pass if verify_is_copy: self._check_setitem_copy(stacklevel=5, t='referant') if clear: self._clear_item_cache()
See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : boolean, default False clear the item cache verify_is_copy : boolean, default True provide is_copy checks
def setup( cls, app_version: str, app_name: str, config_file_path: str, config_sep_str: str, root_path: typing.Optional[typing.List[str]] = None, ): cls.app_version = app_version cls.app_name = app_name cls.config_file_path = config_file_path cls.config_sep_str = config_sep_str cls.root_path = root_path
Configures elib_config in one fell swoop :param app_version: version of the application :param app_name:name of the application :param config_file_path: path to the config file to use :param config_sep_str: separator for config values paths :param root_path: list of strings that will be pre-pended to *all* config values paths (useful to setup a prefix for the whole app)
def commit_hash(dir='.'): cmd = ['git', 'rev-parse', 'HEAD'] try: with open(os.devnull, 'w') as devnull: revision_hash = subprocess.check_output( cmd, cwd=dir, stderr=devnull ) if sys.version_info.major > 2: revision_hash = revision_hash.decode('ascii') return revision_hash.strip() except subprocess.CalledProcessError: return None
Return commit hash for HEAD of checked out branch of the specified directory.
def from_module_dict(cls, environment, module_dict, globals): return cls._from_namespace(environment, module_dict, globals)
Creates a template object from a module. This is used by the module loader to create a template object. .. versionadded:: 2.4
def cmd_web_tech(url, no_cache, verbose): response = web_tech(url, no_cache, verbose) print(json.dumps(response, indent=4))
Use Wappalyzer apps.json database to identify technologies used on a web application. Reference: https://github.com/AliasIO/Wappalyzer Note: This tool only sends one request. So, it's stealth and not suspicious. \b $ habu.web.tech https://woocomerce.com { "Nginx": { "categories": [ "Web Servers" ] }, "PHP": { "categories": [ "Programming Languages" ] }, "WooCommerce": { "categories": [ "Ecommerce" ], "version": "6.3.1" }, "WordPress": { "categories": [ "CMS", "Blogs" ] }, }
def config_shortcut(action, context, name, parent): keystr = get_shortcut(context, name) qsc = QShortcut(QKeySequence(keystr), parent, action) qsc.setContext(Qt.WidgetWithChildrenShortcut) sc = Shortcut(data=(qsc, context, name)) return sc
Create a Shortcut namedtuple for a widget The data contained in this tuple will be registered in our shortcuts preferences page
def is_executable_file(path): fpath = os.path.realpath(path) if not os.path.isfile(fpath): return False mode = os.stat(fpath).st_mode if (sys.platform.startswith('sunos') and os.getuid() == 0): return bool(mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)) return os.access(fpath, os.X_OK)
Checks that path is an executable regular file, or a symlink towards one. This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
def ServiceAccountCredentialsFromP12File( service_account_name, private_key_filename, scopes, user_agent): private_key_filename = os.path.expanduser(private_key_filename) scopes = util.NormalizeScopes(scopes) if oauth2client.__version__ > '1.5.2': credentials = ( service_account.ServiceAccountCredentials.from_p12_keyfile( service_account_name, private_key_filename, scopes=scopes)) if credentials is not None: credentials.user_agent = user_agent return credentials else: with open(private_key_filename, 'rb') as key_file: return oauth2client.client.SignedJwtAssertionCredentials( service_account_name, key_file.read(), scopes, user_agent=user_agent)
Create a new credential from the named .p12 keyfile.
def set_script(self, script): dist, entry_point = get_entry_point_from_console_script(script, self._distributions) if entry_point: self.set_entry_point(entry_point) TRACER.log('Set entrypoint to console_script %r in %r' % (entry_point, dist)) return dist, _, _ = get_script_from_distributions(script, self._distributions) if dist: if self._pex_info.entry_point: raise self.InvalidExecutableSpecification('Cannot set both entry point and script of PEX!') self._pex_info.script = script TRACER.log('Set entrypoint to script %r in %r' % (script, dist)) return raise self.InvalidExecutableSpecification( 'Could not find script %r in any distribution %s within PEX!' % ( script, ', '.join(str(d) for d in self._distributions)))
Set the entry point of this PEX environment based upon a distribution script. :param script: The script name as defined either by a console script or ordinary script within the setup.py of one of the distributions added to the PEX. :raises: :class:`PEXBuilder.InvalidExecutableSpecification` if the script is not found in any distribution added to the PEX.
def setup_network_agents(self): for i in self.env.G.nodes(): self.env.G.node[i]['agent'] = self.agent_type(environment=self.env, agent_id=i, state=deepcopy(self.initial_states[i]))
Initializes agents on nodes of graph and registers them to the SimPy environment
def get_logo_url(self, obj): if current_app and obj.logo_url: return u'{site_url}{path}'.format( site_url=current_app.config.get('THEME_SITEURL'), path=obj.logo_url, )
Get the community logo URL.
def to_bytes(self): return struct.pack(Arp._PACKFMT, self._hwtype.value, self._prototype.value, self._hwaddrlen, self._protoaddrlen, self._operation.value, self._senderhwaddr.packed, self._senderprotoaddr.packed, self._targethwaddr.packed, self._targetprotoaddr.packed)
Return packed byte representation of the ARP header.
def failsafe(func): @functools.wraps(func) def wrapper(*args, **kwargs): extra_files = [] try: return func(*args, **kwargs) except: exc_type, exc_val, exc_tb = sys.exc_info() traceback.print_exc() tb = exc_tb while tb: filename = tb.tb_frame.f_code.co_filename extra_files.append(filename) tb = tb.tb_next if isinstance(exc_val, SyntaxError): extra_files.append(exc_val.filename) app = _FailSafeFlask(extra_files) app.debug = True @app.route('/') @app.route('/<path:path>') def index(path='/'): reraise(exc_type, exc_val, exc_tb) return app return wrapper
Wraps an app factory to provide a fallback in case of import errors. Takes a factory function to generate a Flask app. If there is an error creating the app, it will return a dummy app that just returns the Flask error page for the exception. This works with the Flask code reloader so that if the app fails during initialization it will still monitor those files for changes and reload the app.
def _ParseRegisteredDLLs(self, parser_mediator, registry_key): notify_key = registry_key.GetSubkeyByName('Notify') if not notify_key: return for subkey in notify_key.GetSubkeys(): for trigger in self._TRIGGERS: handler_value = subkey.GetValueByName(trigger) if not handler_value: continue values_dict = { 'Application': subkey.name, 'Handler': handler_value.GetDataAsObject(), 'Trigger': trigger} command_value = subkey.GetValueByName('DllName') if command_value: values_dict['Command'] = command_value.GetDataAsObject() event_data = windows_events.WindowsRegistryEventData() event_data.key_path = subkey.path event_data.offset = subkey.offset event_data.regvalue = values_dict event_data.source_append = ': Winlogon' event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses the registered DLLs that receive event notifications. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
def cctop_check_status(jobid): status = 'http://cctop.enzim.ttk.mta.hu/php/poll.php?jobId={}'.format(jobid) status_text = requests.post(status) return status_text.text
Check the status of a CCTOP job ID. Args: jobid (str): Job ID obtained when job was submitted Returns: str: 'Finished' if the job is finished and results ready to be downloaded, 'Running' if still in progress, 'Invalid' for any errors.
def copy(self, src, dst, suppress_layouts=False): url = '/'.join([src.drive, 'api/copy', str(src.relative_to(src.drive)).rstrip('/')]) params = {'to': str(dst.relative_to(dst.drive)).rstrip('/'), 'suppressLayouts': int(suppress_layouts)} text, code = self.rest_post(url, params=params, session=src.session, verify=src.verify, cert=src.cert) if code not in [200, 201]: raise RuntimeError("%s" % text)
Copy artifact from src to dst
def _handle_continue(self, node, scope, ctxt, stream): self._dlog("handling continue") raise errors.InterpContinue()
Handle continue node :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
def readout(self): elec = self.simulate_poisson_variate() elec_pre = self.saturate(elec) elec_f = self.pre_readout(elec_pre) adu_r = self.base_readout(elec_f) adu_p = self.post_readout(adu_r) self.clean_up() return adu_p
Readout the detector.
def setup(self, steps=None, drop_na=False, **kwargs): input_nodes = None selectors = self.model.get('input', {}).copy() selectors.update(kwargs) for i, b in enumerate(self.steps): if steps is not None and i not in steps and b.name not in steps: continue b.setup(input_nodes, drop_na=drop_na, **selectors) input_nodes = b.output_nodes
Set up the sequence of steps for analysis. Args: steps (list): Optional list of steps to set up. Each element must be either an int giving the index of the step in the JSON config block list, or a str giving the (unique) name of the step, as specified in the JSON config. Steps that do not match either index or name will be skipped. drop_na (bool): Boolean indicating whether or not to automatically drop events that have a n/a amplitude when reading in data from event files.
def draw(self, k=1, random_state=None): random_state = check_random_state(random_state) return self.Q.searchsorted(random_state.uniform(0, 1, size=k), side='right')
Returns k draws from q. For each such draw, the value i is returned with probability q[i]. Parameters ----------- k : scalar(int), optional Number of draws to be returned random_state : int or np.random.RandomState, optional Random seed (integer) or np.random.RandomState instance to set the initial state of the random number generator for reproducibility. If None, a randomly initialized RandomState is used. Returns ------- array_like(int) An array of k independent draws from q
def make_formatter(self, width, padding, alignment, overflow=None): if overflow is None: overflow = self.overflow_default if overflow == 'clip': overflower = lambda x: [x.clip(width, self.table.cliptext)] elif overflow == 'wrap': overflower = lambda x: x.wrap(width) elif overflow == 'preformatted': overflower = lambda x: x.split('\n') else: raise RuntimeError("Unexpected overflow mode: %r" % overflow) align = self.get_aligner(alignment, width) pad = self.get_aligner('center', width + padding) return lambda value: [pad(align(x)) for x in overflower(value)]
Create formatter function that factors the width and alignment settings.
def flow_pipe(Diam, HeadLoss, Length, Nu, PipeRough, KMinor): if KMinor == 0: FlowRate = flow_pipemajor(Diam, HeadLoss, Length, Nu, PipeRough).magnitude else: FlowRatePrev = 0 err = 1.0 FlowRate = min(flow_pipemajor(Diam, HeadLoss, Length, Nu, PipeRough).magnitude, flow_pipeminor(Diam, HeadLoss, KMinor).magnitude ) while err > 0.01: FlowRatePrev = FlowRate HLFricNew = (HeadLoss * headloss_fric(FlowRate, Diam, Length, Nu, PipeRough).magnitude / (headloss_fric(FlowRate, Diam, Length, Nu, PipeRough).magnitude + headloss_exp(FlowRate, Diam, KMinor).magnitude ) ) FlowRate = flow_pipemajor(Diam, HLFricNew, Length, Nu, PipeRough).magnitude if FlowRate == 0: err = 0.0 else: err = (abs(FlowRate - FlowRatePrev) / ((FlowRate + FlowRatePrev) / 2) ) return FlowRate
Return the the flow in a straight pipe. This function works for both major and minor losses and works whether the flow is laminar or turbulent.
def formfield_for_foreignkey(self, db_field, request=None, **kwargs): db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = PolymorphicForeignKeyRawIdWidget( db_field.rel, admin_site=self._get_child_admin_site(db_field.rel), using=db ) if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset return db_field.formfield(**kwargs) return super(PolymorphicAdminRawIdFix, self).formfield_for_foreignkey( db_field, request=request, **kwargs)
Replicates the logic in ModelAdmin.forfield_for_foreignkey, replacing the widget with the patched one above, initialising it with the child admin site.
def _StopAnalysisProcesses(self, abort=False): logger.debug('Stopping analysis processes.') self._StopMonitoringProcesses() if abort: self._AbortTerminate() if not self._use_zeromq: logger.debug('Emptying queues.') for event_queue in self._event_queues.values(): event_queue.Empty() for event_queue in self._event_queues.values(): event_queue.PushItem(plaso_queue.QueueAbort(), block=False) self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) for event_queue in self._event_queues.values(): event_queue.Close(abort=abort) if abort: self._AbortKill() else: self._AbortTerminate() self._AbortJoin(timeout=self._PROCESS_JOIN_TIMEOUT) for event_queue in self._event_queues.values(): event_queue.Close(abort=True)
Stops the analysis processes. Args: abort (bool): True to indicated the stop is issued on abort.
def get_value(self): from spyder.utils.system import memory_usage text = '%d%%' % memory_usage() return 'Mem ' + text.rjust(3)
Return memory usage.
def bins(self) -> List[np.ndarray]: return [binning.bins for binning in self._binnings]
List of bin matrices.
def install(name, minimum_version=None, required_version=None, scope=None, repository=None): flags = [('Name', name)] if minimum_version is not None: flags.append(('MinimumVersion', minimum_version)) if required_version is not None: flags.append(('RequiredVersion', required_version)) if scope is not None: flags.append(('Scope', scope)) if repository is not None: flags.append(('Repository', repository)) params = '' for flag, value in flags: params += '-{0} {1} '.format(flag, value) cmd = 'Install-Module {0} -Force'.format(params) _pshell(cmd) return name in list_modules()
Install a Powershell module from powershell gallery on the system. :param name: Name of a Powershell module :type name: ``str`` :param minimum_version: The maximum version to install, e.g. 1.23.2 :type minimum_version: ``str`` :param required_version: Install a specific version :type required_version: ``str`` :param scope: The scope to install the module to, e.g. CurrentUser, Computer :type scope: ``str`` :param repository: The friendly name of a private repository, e.g. MyREpo :type repository: ``str`` CLI Example: .. code-block:: bash salt 'win01' psget.install PowerPlan
def run(self): self.input_channel.basic_consume(self.handle_message, queue=self.INPUT_QUEUE_NAME, no_ack=True ) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
actual consuming of incoming works starts here
def storage_pools(self): if not self.__storage_pools: self.__storage_pools = StoragePools(self.__connection) return self.__storage_pools
Gets the StoragePools API client. Returns: StoragePools:
def unpack(self, unpacker): (count, ) = unpacker.unpack_struct(_H) items = [(None, None), ] count -= 1 hackpass = False for _i in range(0, count): if hackpass: hackpass = False items.append((None, None)) else: item = _unpack_const_item(unpacker) items.append(item) if item[0] in (CONST_Long, CONST_Double): hackpass = True self.consts = items
Unpacks the constant pool from an unpacker stream
def generate_reset_password_token(user): data = [str(user.id), md5(user.password)] return get_serializer("reset").dumps(data)
Generate a unique reset password token for the specified user. :param user: The user to work with
def extend_values(dictionary, key, items): values = dictionary.get(key, []) try: values.extend(items) except TypeError: raise TypeError('Expected a list, got: %r' % items) dictionary[key] = values
Extend the values for that key with the items