code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def create_program_action(parent, text, name, icon=None, nt_name=None): if is_text_string(icon): icon = get_icon(icon) if os.name == 'nt' and nt_name is not None: name = nt_name path = programs.find_program(name) if path is not None: return create_action(parent, text, icon=icon, triggered=lambda: programs.run_program(name))
Create action to run a program
def _populate_setup(self): makedirs(os.path.dirname(self._cache_marker)) with codecs.open(self._cache_marker, 'w', encoding='utf-8') as fobj: fobj.write(self.cache_uri) self.graph.open(self.cache_uri)
Just create a local marker file since the actual database should already be created on the Fuseki server.
def rename(self, new_name): old_name = self.name self.name = new_name pypath = self.relative_pythonpath self.root_path = self.root_path[:-len(old_name)]+new_name self.relative_pythonpath = pypath self.save()
Rename project and rename its root path accordingly.
def calculate_sleep_time(attempt, delay_factor=5.0, randomization_factor=.5, max_delay=120): if attempt <= 0: return 0 delay = float(2 ** (attempt - 1)) * float(delay_factor) delay = delay * (randomization_factor * random.random() + 1) return min(delay, max_delay)
Calculate the sleep time between retries, in seconds. Based off of `taskcluster.utils.calculateSleepTime`, but with kwargs instead of constant `delay_factor`/`randomization_factor`/`max_delay`. The taskcluster function generally slept for less than a second, which didn't always get past server issues. Args: attempt (int): the retry attempt number delay_factor (float, optional): a multiplier for the delay time. Defaults to 5. randomization_factor (float, optional): a randomization multiplier for the delay time. Defaults to .5. max_delay (float, optional): the max delay to sleep. Defaults to 120 (seconds). Returns: float: the time to sleep, in seconds.
def TSKVolumeGetBytesPerSector(tsk_volume): if hasattr(tsk_volume, 'info') and tsk_volume.info is not None: block_size = getattr(tsk_volume.info, 'block_size', 512) else: block_size = 512 return block_size
Retrieves the number of bytes per sector from a TSK volume object. Args: tsk_volume (pytsk3.Volume_Info): TSK volume information. Returns: int: number of bytes per sector or 512 by default.
def access_key(self): credential = self.query_parameters.get(_x_amz_credential) if credential is not None: credential = url_unquote(credential[0]) else: credential = self.authorization_header_parameters.get(_credential) if credential is None: raise AttributeError("Credential was not passed in the request") try: key, scope = credential.split("/", 1) except ValueError: raise AttributeError("Invalid request credential: %r" % credential) if scope != self.credential_scope: raise AttributeError("Incorrect credential scope: %r (wanted %r)" % (scope, self.credential_scope)) return key
The access key id used to sign the request. If the access key is not in the same credential scope as this request, an AttributeError exception is raised.
def _latex_item_to_string(item, *, escape=False, as_content=False): if isinstance(item, pylatex.base_classes.LatexObject): if as_content: return item.dumps_as_content() else: return item.dumps() elif not isinstance(item, str): item = str(item) if escape: item = escape_latex(item) return item
Use the render method when possible, otherwise uses str. Args ---- item: object An object that needs to be converted to a string escape: bool Flag that indicates if escaping is needed as_content: bool Indicates whether the item should be dumped using `~.LatexObject.dumps_as_content` Returns ------- NoEscape Latex
def check_limit(self, limit): if limit > 0: self.limit = limit else: raise ValueError("Rule limit must be strictly > 0 ({0} given)" .format(limit)) return self
Checks if the given limit is valid. A limit must be > 0 to be considered valid. Raises ValueError when the *limit* is not > 0.
def copy_file_if_modified(src_path, dest_path): if os.path.isdir(dest_path): shutil.rmtree(dest_path) must_copy = False if not os.path.exists(dest_path): must_copy = True else: src_stat = os.stat(src_path) dest_stat = os.stat(dest_path) if ((src_stat[stat.ST_SIZE] != dest_stat[stat.ST_SIZE]) or (src_stat[stat.ST_MTIME] != dest_stat[stat.ST_MTIME])): must_copy = True if must_copy: shutil.copy2(src_path, dest_path)
Only copies the file from the source path to the destination path if it doesn't exist yet or it has been modified. Intended to provide something of an optimisation when a project has large trees of assets.
def _extract(archive, compression, cmd, format, verbosity, outdir): targetname = util.get_single_outfile(outdir, archive) try: with lzma.LZMAFile(archive, **_get_lzma_options(format)) as lzmafile: with open(targetname, 'wb') as targetfile: data = lzmafile.read(READ_SIZE_BYTES) while data: targetfile.write(data) data = lzmafile.read(READ_SIZE_BYTES) except Exception as err: msg = "error extracting %s to %s: %s" % (archive, targetname, err) raise util.PatoolError(msg) return None
Extract an LZMA or XZ archive with the lzma Python module.
def _verify_file_size(self): if self._file_size > PartSize.MAXIMUM_OBJECT_SIZE: self._status = TransferState.FAILED raise SbgError('File size = {}b. Maximum file size is {}b'.format( self._file_size, PartSize.MAXIMUM_OBJECT_SIZE) )
Verifies that the file is smaller then 5TB which is the maximum that is allowed for upload.
def _handle_request_exception(self, e): handle_func = self._exception_default_handler if self.EXCEPTION_HANDLERS: for excs, func_name in self.EXCEPTION_HANDLERS.items(): if isinstance(e, excs): handle_func = getattr(self, func_name) break handle_func(e) if not self._finished: self.finish()
This method handle HTTPError exceptions the same as how tornado does, leave other exceptions to be handled by user defined handler function maped in class attribute `EXCEPTION_HANDLERS` Common HTTP status codes: 200 OK 301 Moved Permanently 302 Found 400 Bad Request 401 Unauthorized 403 Forbidden 404 Not Found 405 Method Not Allowed 500 Internal Server Error It is suggested only to use above HTTP status codes
def from_json(json): return Point( lat=json['lat'], lon=json['lon'], time=isostr_to_datetime(json['time']) )
Creates Point instance from JSON representation Args: json (:obj:`dict`): Must have at least the following keys: lat (float), lon (float), time (string in iso format). Example, { "lat": 9.3470298, "lon": 3.79274, "time": "2016-07-15T15:27:53.574110" } json: map representation of Point instance Returns: :obj:`Point`
def main(): arg_parse = setup_argparse() args = arg_parse.parse_args() if not args.quiet: print('GNS3 Topology Converter') if args.debug: logging_level = logging.DEBUG else: logging_level = logging.WARNING logging.basicConfig(level=logging_level, format=LOG_MSG_FMT, datefmt=LOG_DATE_FMT) logging.getLogger(__name__) if args.topology == 'topology.net': args.topology = os.path.join(os.getcwd(), 'topology.net') topology_files = [{'file': topology_abspath(args.topology), 'snapshot': False}] topology_files.extend(get_snapshots(args.topology)) topology_name = name(args.topology, args.name) for topology in topology_files: do_conversion(topology, topology_name, args.output, args.debug)
Entry point for gns3-converter
def render_template(self, template_name, out_path=None): return render_template(template_name, self.to_dict(), out_path=out_path)
Render a template based on this TileBus Block. The template has access to all of the attributes of this block as a dictionary (the result of calling self.to_dict()). You can optionally render to a file by passing out_path. Args: template_name (str): The name of the template to load. This must be a file in config/templates inside this package out_path (str): An optional path of where to save the output file, otherwise it is just returned as a string. Returns: string: The rendered template data.
def _load_vector_fit(self, fit_key, h5file): vector_fit = [] for i in range(len(h5file[fit_key].keys())): fit_data = self._read_dict(h5file[fit_key]['comp_%d'%i]) vector_fit.append(self._load_scalar_fit(fit_data=fit_data)) return vector_fit
Loads a vector of fits
def PARAMLIMITS(self, value): assert set(value.keys()) == set(self.PARAMLIMITS.keys()), "The \ new parameter limits are not defined for the same set \ of parameters as before." for param in value.keys(): assert value[param][0] < value[param][1], "The new \ minimum value for {0}, {1}, is equal to or \ larger than the new maximum value, {2}"\ .format(param, value[param][0], value[param][1]) self._PARAMLIMITS = value.copy()
Set new `PARAMLIMITS` dictionary.
def generate_configfile(cfg_file,defaults=defaults): _mkdir_for_config(cfg_file=cfg_file) with open(cfg_file, 'w') as f: f.write('') for section in defaults.keys(): set_option(section, cfg_file=cfg_file, **defaults[section])
Write a new nago.ini config file from the defaults. Arguments: cfg_file -- File that is written to like /etc/nago/nago.ini defaults -- Dictionary with default values to use
def validate(self, dist): for item in self.remove: if not dist.has_contents_for(item): raise DistutilsSetupError( "%s wants to be able to remove %s, but the distribution" " doesn't contain any packages or modules under %s" % (self.description, item, item) )
Verify that feature makes sense in context of distribution This method is called by the distribution just before it parses its command line. It checks to ensure that the 'remove' attribute, if any, contains only valid package/module names that are present in the base distribution when 'setup()' is called. You may override it in a subclass to perform any other required validation of the feature against a target distribution.
def generate_menu(self, ass, text, path=None, level=0): menu = self.create_menu() for index, sub in enumerate(sorted(ass[1], key=lambda y: y[0].fullname.lower())): if index != 0: text += "|" text += "- " + sub[0].fullname new_path = list(path) if level == 0: new_path.append(ass[0].name) new_path.append(sub[0].name) menu_item = self.menu_item(sub, new_path) if sub[1]: (sub_menu, txt) = self.generate_menu(sub, text, new_path, level=level + 1) menu_item.set_submenu(sub_menu) menu.append(menu_item) return menu, text
Function generates menu from based on ass parameter
async def listHooks(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
List hooks in a given group This endpoint will return a list of all the hook definitions within a given hook group. This method gives output: ``v1/list-hooks-response.json#`` This method is ``stable``
def update_headers(self, headers: Optional[LooseHeaders]) -> None: self.headers = CIMultiDict() netloc = cast(str, self.url.raw_host) if helpers.is_ipv6_address(netloc): netloc = '[{}]'.format(netloc) if not self.url.is_default_port(): netloc += ':' + str(self.url.port) self.headers[hdrs.HOST] = netloc if headers: if isinstance(headers, (dict, MultiDictProxy, MultiDict)): headers = headers.items() for key, value in headers: if key.lower() == 'host': self.headers[key] = value else: self.headers.add(key, value)
Update request headers.
def nic_b(msg): tc = typecode(msg) if tc < 9 or tc > 18: raise RuntimeError("%s: Not a airborne position message, expecting 8<TC<19" % msg) msgbin = common.hex2bin(msg) nic_b = int(msgbin[39]) return nic_b
Obtain NICb, navigation integrity category supplement-b Args: msg (string): 28 bytes hexadecimal message string Returns: int: NICb number (0 or 1)
def from_json_str(cls, json_str): return cls.from_json(json.loads(json_str, cls=JsonDecoder))
Convert json string representation into class instance. Args: json_str: json representation as string. Returns: New instance of the class with data loaded from json string.
def symbol_top(body_output, targets, model_hparams, vocab_size): del targets if model_hparams.shared_embedding_and_softmax_weights: scope_name = "shared" reuse = tf.AUTO_REUSE else: scope_name = "softmax" reuse = False with tf.variable_scope(scope_name, reuse=reuse): body_output_shape = common_layers.shape_list(body_output) var = get_weights(model_hparams, vocab_size, body_output_shape[-1]) if (model_hparams.factored_logits and model_hparams.mode == tf.estimator.ModeKeys.TRAIN): body_output = tf.expand_dims(body_output, 3) return common_layers.FactoredTensor(body_output, var) else: body_output = tf.reshape(body_output, [-1, body_output_shape[-1]]) logits = tf.matmul(body_output, var, transpose_b=True) return tf.reshape(logits, body_output_shape[:-1] + [1, vocab_size])
Generate logits. Args: body_output: A Tensor with shape [batch, p0, p1, model_hparams.hidden_size]. targets: Unused. model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
def build_projection_kwargs(cls, source, mapping): return cls._map_arg_names(source, cls._default_attr_mapping + mapping)
Handle mapping a dictionary of metadata to keyword arguments.
def scale_up_dynos(id): config = PsiturkConfig() config.load_config() dyno_type = config.get('Server Parameters', 'dyno_type') num_dynos_web = config.get('Server Parameters', 'num_dynos_web') num_dynos_worker = config.get('Server Parameters', 'num_dynos_worker') log("Scaling up the dynos...") subprocess.call( "heroku ps:scale web=" + str(num_dynos_web) + ":" + str(dyno_type) + " --app " + id, shell=True) subprocess.call( "heroku ps:scale worker=" + str(num_dynos_worker) + ":" + str(dyno_type) + " --app " + id, shell=True) clock_on = config.getboolean('Server Parameters', 'clock_on') if clock_on: subprocess.call( "heroku ps:scale clock=1:" + dyno_type + " --app " + id, shell=True)
Scale up the Heroku dynos.
def parse_domains(self, domain, params): domain_id = self.get_non_aws_id(domain['DomainName']) domain['name'] = domain.pop('DomainName') self.domains[domain_id] = domain
Parse a single Route53Domains domain
def album(self, album_id, *, include_description=True, include_songs=True): response = self._call( mc_calls.FetchAlbum, album_id, include_description=include_description, include_tracks=include_songs ) album_info = response.body return album_info
Get information about an album. Parameters: album_id (str): An album ID. Album IDs start with a 'B'. include_description (bool, Optional): Include description of the album in the returned dict. include_songs (bool, Optional): Include songs from the album in the returned dict. Default: ``True``. Returns: dict: Album information.
def on_create_view(self): d = self.declaration changed = not d.condition if changed: d.condition = True view = self.get_view() if changed: self.ready.set_result(True) return view
Trigger the click
def brute_permutation(A, B): rmsd_min = np.inf view_min = None num_atoms = A.shape[0] initial_order = list(range(num_atoms)) for reorder_indices in generate_permutations(initial_order, num_atoms): coords_ordered = B[reorder_indices] rmsd_temp = kabsch_rmsd(A, coords_ordered) if rmsd_temp < rmsd_min: rmsd_min = rmsd_temp view_min = copy.deepcopy(reorder_indices) return view_min
Re-orders the input atom list and xyz coordinates using the brute force method of permuting all rows of the input coordinates Parameters ---------- A : array (N,D) matrix, where N is points and D is dimension B : array (N,D) matrix, where N is points and D is dimension Returns ------- view : array (N,1) matrix, reordered view of B projected to A
def markdown_2_rst(lines): out = [] code = False for line in lines: if line.strip() == "```": code = not code space = " " * (len(line.rstrip()) - 3) if code: out.append("\n\n%s.. code-block:: none\n\n" % space) else: out.append("\n") else: if code and line.strip(): line = " " + line else: line = line.replace("\\", "\\\\") out.append(line) return out
Convert markdown to restructured text
def chunk_upload_file(self, name, folder_id, file_path, progress_callback=None, chunk_size=1024*1024*1): try: return self.__do_chunk_upload_file(name, folder_id, file_path, progress_callback, chunk_size) except BoxError, ex: if ex.status != 401: raise return self.__do_chunk_upload_file(name, folder_id, file_path, progress_callback, chunk_size)
Upload a file chunk by chunk. The whole file is never loaded in memory. Use this function for big file. The callback(transferred, total) to let you know the upload progress. Upload can be cancelled if the callback raise an Exception. >>> def progress_callback(transferred, total): ... print 'Uploaded %i bytes of %i' % (transferred, total, ) ... if user_request_cancel: ... raise MyCustomCancelException() Args: name (str): Name of the file on your Box storage. folder_id (int): ID of the folder where to upload the file. file_path (str): Local path of the file to upload. progress_callback (func): Function called each time a chunk is uploaded. chunk_size (int): Size of chunks. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
def remove_first_word(ctx, text): text = conversions.to_string(text, ctx).lstrip() first = first_word(ctx, text) return text[len(first):].lstrip() if first else ''
Removes the first word from the given text string
def from_totient(public_key, totient): p_plus_q = public_key.n - totient + 1 p_minus_q = isqrt(p_plus_q * p_plus_q - public_key.n * 4) q = (p_plus_q - p_minus_q) // 2 p = p_plus_q - q if not p*q == public_key.n: raise ValueError('given public key and totient do not match.') return PaillierPrivateKey(public_key, p, q)
given the totient, one can factorize the modulus The totient is defined as totient = (p - 1) * (q - 1), and the modulus is defined as modulus = p * q Args: public_key (PaillierPublicKey): The corresponding public key totient (int): the totient of the modulus Returns: the :class:`PaillierPrivateKey` that corresponds to the inputs Raises: ValueError: if the given totient is not the totient of the modulus of the given public key
def triple_fraction(self,query='mass_A > 0', unc=False): subdf = self.stars.query(query) ntriples = ((subdf['mass_B'] > 0) & (subdf['mass_C'] > 0)).sum() frac = ntriples/len(subdf) if unc: return frac, frac/np.sqrt(ntriples) else: return frac
Triple fraction of stars following given query
def from_string(rxn_string): rct_str, prod_str = rxn_string.split("->") def get_comp_amt(comp_str): return {Composition(m.group(2)): float(m.group(1) or 1) for m in re.finditer(r"([\d\.]*(?:[eE]-?[\d\.]+)?)\s*([A-Z][\w\.\(\)]*)", comp_str)} return BalancedReaction(get_comp_amt(rct_str), get_comp_amt(prod_str))
Generates a balanced reaction from a string. The reaction must already be balanced. Args: rxn_string: The reaction string. For example, "4 Li + O2-> 2Li2O" Returns: BalancedReaction
def _Open(self): self._connection = sqlite3.connect( self._path, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) self._cursor = self._connection.cursor()
Opens the task storage for reading.
def _encode_timestamp(name, value, dummy0, dummy1): return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time)
Encode bson.timestamp.Timestamp.
def check_down_connections(self): now = time.time() for db_num, marked_down_at in self._down_connections.items(): if marked_down_at + self.retry_timeout <= now: self.mark_connection_up(db_num)
Iterates through all connections which were previously listed as unavailable and marks any that have expired their retry_timeout as being up.
async def on_raw_301(self, message): target, nickname, message = message.params info = { 'away': True, 'away_message': message } if nickname in self.users: self._sync_user(nickname, info) if nickname in self._pending['whois']: self._whois_info[nickname].update(info)
User is away.
def all(self): return self.pages(self.url.page, self.url.max_page)
Yield torrents in range from current page to last page
def cli(env): settings = config.get_settings_from_client(env.client) env.fout(config.config_table(settings))
Show current configuration.
def get_dataarg(args): for i, arg in enumerate(args): if is_nested_config_arg(arg): return i, arg elif is_std_config_arg(arg): return i, {"config": arg} elif isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]): return i, arg[0] raise ValueError("Did not find configuration or data object in arguments: %s" % args)
Retrieve the world 'data' argument from a set of input parameters.
def run_MDR(n,stack_float,labels=None): x1 = stack_float.pop() x2 = stack_float.pop() if len(np.unique(x1))<=3 and len(np.unique(x2))<=3: tmp = np.vstack((x1,x2)).transpose() if labels is None: return n.model.transform(tmp)[:,0] else: out = n.model.fit_transform(tmp,labels)[:,0] return out else: return np.zeros(x1.shape[0])
run utility function for MDR nodes.
def query(self, expression, vm='python'): condition = self.eval(expression, vm=vm) return self.compress(condition)
Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array
def _find_guids(guid_string): guids = [] for found_guid in re.finditer(GUID_REGEX, guid_string): if found_guid.groups(): guids.append(found_guid.group(0).strip('{}')) return sorted(list(set(guids)))
Return the set of GUIDs found in guid_string :param str guid_string: String containing zero or more GUIDs. Each GUID may or may not be enclosed in {} Example data (this string contains two distinct GUIDs): PARENT_SNAPSHOT_ID SNAPSHOT_ID {a5b8999f-5d95-4aff-82de-e515b0101b66} {a5b8999f-5d95-4aff-82de-e515b0101b66} *{a7345be5-ab66-478c-946e-a6c2caf14909}
def write_xml_document(self, document): self._out.write(ET.tostring(document)) self._out.flush()
Writes a string representation of an ``ElementTree`` object to the output stream. :param document: An ``ElementTree`` object.
def copy_reset(self): self._filters = [] self._filter_or = False self._paginate = True self._paginate_count = 0 self._result_count = None self._result_limit = 500 self._result_start = 0
Reset values after instance has been copied
def _or_join(self, close_group=False): if not self.initialized: raise ValueError("You must add a search term before adding an operator.") else: self._operator("OR", close_group=close_group) return self
Combine terms with OR. There must be a term added before using this method. Arguments: close_group (bool): If ``True``, will end the current group and start a new one. If ``False``, will continue current group. Example: If the current query is "(term1" .or(close_group=True) => "(term1) OR(" .or(close_group=False) => "(term1 OR " Returns: SearchHelper: Self
def add_hits_to_proteins(self, hmm_hit_list): for org in self.organisms: print "adding SearchIO hit objects for", org.accession for hit in hmm_hit_list: hit_org_id = hit.id.split(',')[0] hit_prot_id = hit.id.split(',')[1] if org.accession == hit_org_id: for prot in org.proteins: if prot.accession == hit_prot_id: prot.hmm_hit_list.append(hit)
Add HMMER results to Protein objects
def md5_digest(instr): return salt.utils.stringutils.to_unicode( hashlib.md5(salt.utils.stringutils.to_bytes(instr)).hexdigest() )
Generate an md5 hash of a given string.
def add_events(self, **kwargs): event_q = kwargs.get('event_queue') pri = kwargs.get('priority') if not event_q or not pri: return try: event_type = 'server.failure.recovery' payload = {} timestamp = time.ctime() data = (event_type, payload) event_q.put((pri, timestamp, data)) LOG.debug('Added failure recovery event to the queue.') except Exception as exc: LOG.exception('Error: %(exc)s for event %(event)s', {'exc': str(exc), 'event': event_type}) raise exc
Add failure event into the queue.
def output(data, **kwargs): if isinstance(data, Exception): data = six.text_type(data) if 'output_indent' in __opts__ and __opts__['output_indent'] >= 0: return pprint.pformat(data, indent=__opts__['output_indent']) return pprint.pformat(data)
Print out via pretty print
def _write_module_descriptor_file(handle, module_dir): readme = _module_descriptor_file(module_dir) readme_content = ( "Module: %s\nDownload Time: %s\nDownloader Hostname: %s (PID:%d)" % (handle, str(datetime.datetime.today()), socket.gethostname(), os.getpid())) tf_utils.atomic_write_string_to_file(readme, readme_content, overwrite=True)
Writes a descriptor file about the directory containing a module. Args: handle: Module name/handle. module_dir: Directory where a module was downloaded.
def iter_children(self, key=None): u tag = None if key: tag = self._get_aliases().get(key) if not tag: raise KeyError(key) for child in self._xml.iterchildren(tag=tag): if len(child): yield self.__class__(child) else: yield Literal(child)
u"""Iterates over children. :param key: A key for filtering children by tagname.
def quote_str(obj): r if not isinstance(obj, str): return obj return "'{obj}'".format(obj=obj) if '"' in obj else '"{obj}"'.format(obj=obj)
r""" Add extra quotes to a string. If the argument is not a string it is returned unmodified. :param obj: Object :type obj: any :rtype: Same as argument For example: >>> import pmisc >>> pmisc.quote_str(5) 5 >>> pmisc.quote_str('Hello!') '"Hello!"' >>> pmisc.quote_str('He said "hello!"') '\'He said "hello!"\''
def cleanup(self): self.lock.acquire() logger.debug('Acquired lock in cleanup for ' + str(self)) self.children = [child for child in self.children if child.is_alive()] self.lock.release()
Clean up finished children. :return: None
def _determine_keys(dictionary): optional = {} defaults = {} mandatory = {} types = {} for key, value in dictionary.items(): if isinstance(key, Optional): optional[key.value] = parse_schema(value) if isinstance(value, BaseSchema) and\ value.default is not UNSPECIFIED: defaults[key.value] = (value.default, value.null_values) continue if type(key) is type: types[key] = parse_schema(value) continue mandatory[key] = parse_schema(value) return mandatory, optional, types, defaults
Determine the different kinds of keys.
def _add_sentence(self, sentence, ignore_traces=True): self.sentences.append(self._node_id) self.add_edge(self.root, self._node_id, edge_type=dg.EdgeTypes.dominance_relation) self._parse_sentencetree(sentence, ignore_traces=ignore_traces) self._node_id += 1
add a sentence from the input document to the document graph. Parameters ---------- sentence : nltk.tree.Tree a sentence represented by a Tree instance
def imagenet_preprocess_example(example, mode, resize_size=None, normalize=True): resize_size = resize_size or [299, 299] assert resize_size[0] == resize_size[1] image = example["inputs"] if mode == tf.estimator.ModeKeys.TRAIN: image = preprocess_for_train(image, image_size=resize_size[0], normalize=normalize) else: image = preprocess_for_eval(image, image_size=resize_size[0], normalize=normalize) example["inputs"] = image return example
Preprocessing used for Imagenet and similar problems.
def delete_dimension(dimension_id,**kwargs): try: dimension = db.DBSession.query(Dimension).filter(Dimension.id==dimension_id).one() db.DBSession.query(Unit).filter(Unit.dimension_id==dimension.id).delete() db.DBSession.delete(dimension) db.DBSession.flush() return True except NoResultFound: raise ResourceNotFoundError("Dimension (dimension_id=%s) does not exist"%(dimension_id))
Delete a dimension from the DB. Raises and exception if the dimension does not exist
def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None): if copied_libs is None: copied_libs = {} else: copied_libs = dict(copied_libs) done = False while not done: in_len = len(copied_libs) _copy_required(lib_path, copy_filt_func, copied_libs) done = len(copied_libs) == in_len return copied_libs
Analyze `lib_path` for library dependencies and copy libraries `lib_path` is a directory containing libraries. The libraries might themselves have dependencies. This function analyzes the dependencies and copies library dependencies that match the filter `copy_filt_func`. It also adjusts the depending libraries to use the copy. It keeps iterating over `lib_path` until all matching dependencies (of dependencies of dependencies ...) have been copied. Parameters ---------- lib_path : str Directory containing libraries copy_filt_func : None or callable, optional If None, copy any library that found libraries depend on. If callable, called on each depended library name; copy where ``copy_filt_func(libname)`` is True, don't copy otherwise copied_libs : dict Dict with (key, value) pairs of (``copied_lib_path``, ``dependings_dict``) where ``copied_lib_path`` is the canonical path of a library that has been copied to `lib_path`, and ``dependings_dict`` is a dictionary with (key, value) pairs of (``depending_lib_path``, ``install_name``). ``depending_lib_path`` is the canonical path of the library depending on ``copied_lib_path``, ``install_name`` is the name that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in its install names). Returns ------- copied_libs : dict Input `copied_libs` dict with any extra libraries and / or dependencies added.
def _set_lim_and_transforms(self): self.transAxes = BboxTransformTo(self.bbox) self.transData = self.GingaTransform() self.transData.viewer = self.viewer self._xaxis_transform = self.transData self._yaxis_transform = self.transData
This is called once when the plot is created to set up all the transforms for the data, text and grids.
def setTransducer(self, edfsignal, transducer): if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['transducer'] = transducer self.update_header()
Sets the transducer of signal edfsignal :param edfsignal: int :param transducer: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
def sync_mounts(self, active_mounts, resources, vault_client): mounts = [x for x in resources if isinstance(x, (Mount, AWS))] s_resources = sorted(mounts, key=absent_sort) for resource in s_resources: active_mounts = self.actually_mount(vault_client, resource, active_mounts) for resource in [x for x in resources if isinstance(x, Secret)]: n_mounts = self.actually_mount(vault_client, resource, active_mounts) if len(n_mounts) != len(active_mounts): LOG.warning("Ad-Hoc mount with %s. Please specify" " explicit mountpoints.", resource) active_mounts = n_mounts return active_mounts, [x for x in resources if not isinstance(x, (Mount))]
Synchronizes mount points. Removes things before adding new.
def simple_md2html(text, urls): retval = special_links_replace(text, urls) retval = re.sub(r'\n\n', r'</p><p>', retval) retval = re.sub(r'\n', r'<br />\n', retval) retval = re.sub(r'"', r'&quot;', retval) retval = list2html(retval) return link2html(retval)
Convert a text from md to html
def result(self, input_sequence: str, config: Optional[BasicConfig] = None) -> Any: sequential_labelers = [ sl_cls(input_sequence, config) for sl_cls in self.sequential_labeler_classes ] index_labels_generator = ((index, { type(labeler): labeler.label(index) for labeler in sequential_labelers }) for index in range(len(input_sequence))) label_processor = self.label_processor_class(input_sequence, index_labels_generator, config) label_processor_result = label_processor.result() output_generator = self.output_generator_class(input_sequence, label_processor_result, config) return output_generator.result()
Execute the workflow. :param input_sequence: The input sequence.
def _FindPartition(self, key): hash_value = self.hash_generator.ComputeHash(key) return self._LowerBoundSearch(self.partitions, hash_value)
Finds the partition from the byte array representation of the partition key.
async def home_z(self, mount: top_types.Mount = None): if not mount: axes = [Axis.Z, Axis.A] else: axes = [Axis.by_mount(mount)] await self.home(axes)
Home the two z-axes
def sky2pix_ellipse(self, pos, a, b, pa): ra, dec = pos x, y = self.sky2pix(pos) x_off, y_off = self.sky2pix(translate(ra, dec, a, pa)) sx = np.hypot((x - x_off), (y - y_off)) theta = np.arctan2((y_off - y), (x_off - x)) x_off, y_off = self.sky2pix(translate(ra, dec, b, pa - 90)) sy = np.hypot((x - x_off), (y - y_off)) theta2 = np.arctan2((y_off - y), (x_off - x)) - np.pi / 2 defect = theta - theta2 sy *= abs(np.cos(defect)) return x, y, sx, sy, np.degrees(theta)
Convert an ellipse from sky to pixel coordinates. Parameters ---------- pos : (float, float) The (ra, dec) of the ellipse center (degrees). a, b, pa: float The semi-major axis, semi-minor axis and position angle of the ellipse (degrees). Returns ------- x,y : float The (x, y) pixel coordinates of the ellipse center. sx, sy : float The major and minor axes (FWHM) in pixels. theta : float The rotation angle of the ellipse (degrees). theta = 0 corresponds to the ellipse being aligned with the x-axis.
def withTracebackPrint(ErrorType, thrownError, _traceback): file = StringIO.StringIO() traceback.print_exception(ErrorType, thrownError, _traceback, file = file) return _loadError(ErrorType, thrownError, file.getvalue())
returns an Exception object for the given ErrorType of the thrownError and the _traceback can be used like withTracebackPrint(*sys.exc_info())
def layout_route(self, request): r body = self.layout_impl() return http_util.Respond(request, body, 'application/json')
r"""Fetches the custom layout specified by the config file in the logdir. If more than 1 run contains a layout, this method merges the layouts by merging charts within individual categories. If 2 categories with the same name are found, the charts within are merged. The merging is based on the order of the runs to which the layouts are written. The response is a JSON object mirroring properties of the Layout proto if a layout for any run is found. The response is an empty object if no layout could be found.
def __init_vertical_plot(self): if len(self.ax2.lines) > 0: self.ax2.cla() self.ax2.set_ylabel(self.datalabel, fontsize=self.fontsize) self.ax2.set_xlabel(self.spectrumlabel, fontsize=self.fontsize) self.ax2.set_title('vertical point profiles', fontsize=self.fontsize) self.ax2.set_xlim([1, self.bands]) self.vline = self.ax2.axvline(self.slider.value, color='black')
set up the vertical profile plot Returns -------
def create_command_history_subscription(self, on_data=None, timeout=60): return self._client.create_command_history_subscription( issued_command=self, on_data=on_data, timeout=timeout)
Create a new command history subscription for this command. :param on_data: Function that gets called with :class:`.CommandHistory` updates. :param float timeout: The amount of seconds to wait for the request to complete. :return: Future that can be used to manage the background websocket subscription :rtype: .CommandHistorySubscription
def _max_gain_split(self, examples): gains = self._new_set_of_gain_counters() for example in examples: for gain in gains: gain.add(example) winner = max(gains, key=lambda gain: gain.get_gain()) if not winner.get_target_class_counts(): raise ValueError("Dataset is empty") return winner
Returns an OnlineInformationGain of the attribute with max gain based on `examples`.
def info(self, collector_id): cid = self.collector_id if collector_id: cid = collector_id url = '{0}/{1}'.format(self.url, cid) request = requests.get(url, auth=self.auth) return request.json()
Return a dict of collector. Args: collector_id (int): id of collector (optional)
def new_binary_container(self, name): self._message_stack.append(BinaryContainerTemplate(name, self._current_container))
Defines a new binary container to template. Binary container can only contain binary fields defined with `Bin` keyword. Examples: | New binary container | flags | | bin | 2 | foo | | bin | 6 | bar | | End binary container |
def fill_datetime(self): if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._fill_datetime
Returns when the slot was filled. Returns: A datetime.datetime. Raises: SlotNotFilledError if the value hasn't been filled yet.
def select_atoms(indices): rep = current_representation() rep.select({'atoms': Selection(indices, current_system().n_atoms)}) return rep.selection_state
Select atoms by their indices. You can select the first 3 atoms as follows:: select_atoms([0, 1, 2]) Return the current selection dictionary.
def expand_node_successors(universe, graph, node: BaseEntity) -> None: skip_predecessors = set() for predecessor in universe.predecessors(node): if predecessor in graph: skip_predecessors.add(predecessor) continue graph.add_node(predecessor, **universe.nodes[predecessor]) graph.add_edges_from( (predecessor, target, key, data) for predecessor, target, key, data in universe.in_edges(node, data=True, keys=True) if predecessor not in skip_predecessors ) update_node_helper(universe, graph) update_metadata(universe, graph)
Expand around the successors of the given node in the result graph. :param pybel.BELGraph universe: The graph containing the stuff to add :param pybel.BELGraph graph: The graph to add stuff to :param node: A BEL node
def make_secret(form_instance, secret_fields=None): warn_untested() if secret_fields is None: secret_fields = ['business', 'item_name'] data = "" for name in secret_fields: if hasattr(form_instance, 'cleaned_data'): if name in form_instance.cleaned_data: data += unicode(form_instance.cleaned_data[name]) else: if name in form_instance.initial: data += unicode(form_instance.initial[name]) elif name in form_instance.fields and form_instance.fields[name].initial is not None: data += unicode(form_instance.fields[name].initial) secret = get_sha1_hexdigest(settings.SECRET_KEY, data) return secret
Returns a secret for use in a EWP form or an IPN verification based on a selection of variables in params. Should only be used with SSL.
def get_full_python_version(): version_part = '.'.join(str(x) for x in sys.version_info) int_width = struct.calcsize('P') * 8 int_width_part = str(int_width) + 'bit' return version_part + '.' + int_width_part
Get full Python version. E.g. - `2.7.11.final.0.32bit` - `3.5.1.final.0.64bit` :return: Full Python version.
def detach_usage_plan_from_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None): return _update_usage_plan_apis(plan_id, apis, 'remove', region=region, key=key, keyid=keyid, profile=profile)
Detaches given usage plan from each of the apis provided in a list of apiId and stage value .. versionadded:: 2017.7.0 apis a list of dictionaries, where each dictionary contains the following: apiId a string, which is the id of the created API in AWS ApiGateway stage a string, which is the stage that the created API is deployed to. CLI Example: .. code-block:: bash salt myminion boto_apigateway.detach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
def save_all(polyfile): nump = len(PolygonFilter.instances) if nump == 0: raise PolygonFilterError("There are not polygon filters to save.") for p in PolygonFilter.instances: polyobj = p.save(polyfile, ret_fobj=True) polyobj.close()
Save all polygon filters
async def send_script(self, conn_id, data): self._ensure_connection(conn_id, True) connection_string = self._get_property(conn_id, "connection_string") msg = dict(connection_string=connection_string, fragment_count=1, fragment_index=0, script=base64.b64encode(data)) await self._send_command(OPERATIONS.SEND_SCRIPT, msg, COMMANDS.SendScriptResponse)
Send a a script to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection data (bytes): the script to send to the device
def validate(self, ticket, client_ip=None, now=None, encoding='utf-8'): parts = self.parse(ticket) new_ticket = self.new(*(parts[1:]), client_ip=client_ip, encoding=encoding) if new_ticket[:self._hash.digest_size * 2] != parts.digest: raise TicketDigestError(ticket) if now is None: now = time.time() if parts.valid_until <= now: raise TicketExpired(ticket) return parts
Validates the passed ticket, , raises a TicketError on failure Args: ticket: String value (possibly generated by new function) client_ip: Optional IPAddress of client, should be passed if the ip address was passed on ticket creation. now: Optional (defaults to time.time()) time to use when validating ticket date Returns: Ticket a TicketInfo tuple containing the users authentication details on success Raises: TicketParseError: Invalid ticket format TicketDigestError: Digest is incorrect (ticket data was modified) TicketExpired: Ticket has passed expiration date
def cp(src_filename, dst_filename): src_dev, src_dev_filename = get_dev_and_path(src_filename) dst_dev, dst_dev_filename = get_dev_and_path(dst_filename) if src_dev is dst_dev: return auto(copy_file, src_filename, dst_dev_filename) filesize = auto(get_filesize, src_filename) if dst_dev is None: with open(dst_dev_filename, 'wb') as dst_file: return src_dev.remote(send_file_to_host, src_dev_filename, dst_file, filesize, xfer_func=recv_file_from_remote) if src_dev is None: with open(src_dev_filename, 'rb') as src_file: return dst_dev.remote(recv_file_from_host, src_file, dst_dev_filename, filesize, xfer_func=send_file_to_remote) host_temp_file = tempfile.TemporaryFile() if src_dev.remote(send_file_to_host, src_dev_filename, host_temp_file, filesize, xfer_func=recv_file_from_remote): host_temp_file.seek(0) return dst_dev.remote(recv_file_from_host, host_temp_file, dst_dev_filename, filesize, xfer_func=send_file_to_remote) return False
Copies one file to another. The source file may be local or remote and the destination file may be local or remote.
def reset(self, indices, observations): assert isinstance(indices, np.ndarray) assert len(indices.shape) == 1 assert isinstance(observations, np.ndarray) assert indices.shape[0] == observations.shape[0] for index, observation in zip(indices, observations): trajectory = self._trajectories[index] if not trajectory.is_active: trajectory.add_time_step(observation=observation) continue self._complete_trajectory(trajectory, index) self._trajectories[index].add_time_step(observation=observation)
Resets trajectories at given indices and populates observations. Reset can either be called right at the beginning, when there are no time-steps, or to reset a currently active trajectory. If resetting a currently active trajectory then we save it in self._completed_trajectories. Args: indices: 1-D np.ndarray stating the indices to reset. observations: np.ndarray of shape (indices len, obs.shape) of observations
def write_implied_format(self, path, jpeg_quality=0, jpeg_progressive=0): filename = fspath(path) with _LeptonicaErrorTrap(): lept.pixWriteImpliedFormat( os.fsencode(filename), self._cdata, jpeg_quality, jpeg_progressive )
Write pix to the filename, with the extension indicating format. jpeg_quality -- quality (iff JPEG; 1 - 100, 0 for default) jpeg_progressive -- (iff JPEG; 0 for baseline seq., 1 for progressive)
def get_all_load_balancers(self, load_balancer_names=None): params = {} if load_balancer_names: self.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') return self.get_list('DescribeLoadBalancers', params, [('member', LoadBalancer)])
Retrieve all load balancers associated with your account. :type load_balancer_names: list :keyword load_balancer_names: An optional list of load balancer names. :rtype: :py:class:`boto.resultset.ResultSet` :return: A ResultSet containing instances of :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
def query_nodes(self, bel: Optional[str] = None, type: Optional[str] = None, namespace: Optional[str] = None, name: Optional[str] = None, ) -> List[Node]: q = self.session.query(Node) if bel: q = q.filter(Node.bel.contains(bel)) if type: q = q.filter(Node.type == type) if namespace or name: q = q.join(NamespaceEntry) if namespace: q = q.join(Namespace).filter(Namespace.keyword.contains(namespace)) if name: q = q.filter(NamespaceEntry.name.contains(name)) return q
Query nodes in the database. :param bel: BEL term that describes the biological entity. e.g. ``p(HGNC:APP)`` :param type: Type of the biological entity. e.g. Protein :param namespace: Namespace keyword that is used in BEL. e.g. HGNC :param name: Name of the biological entity. e.g. APP
def refresh(self, **kwargs): if self._id_attr: path = '%s/%s' % (self.manager.path, self.id) else: path = self.manager.path server_data = self.manager.gitlab.http_get(path, **kwargs) self._update_attrs(server_data)
Refresh a single object from server. Args: **kwargs: Extra options to send to the server (e.g. sudo) Returns None (updates the object) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server cannot perform the request
def _request_eip(interface, vm_): params = {'Action': 'AllocateAddress'} params['Domain'] = interface.setdefault('domain', 'vpc') eips = aws.query(params, return_root=True, location=get_location(vm_), provider=get_provider(), opts=__opts__, sigver='4') for eip in eips: if 'allocationId' in eip: return eip['allocationId'] return None
Request and return Elastic IP
def put(value): worker = global_worker worker.check_connected() with profiling.profile("ray.put"): if worker.mode == LOCAL_MODE: return value object_id = ray._raylet.compute_put_id( worker.current_task_id, worker.task_context.put_index, ) worker.put_object(object_id, value) worker.task_context.put_index += 1 return object_id
Store an object in the object store. Args: value: The Python object to be stored. Returns: The object ID assigned to this value.
def _get_prediction_device(self) -> int: devices = {util.get_device_of(param) for param in self.parameters()} if len(devices) > 1: devices_string = ", ".join(str(x) for x in devices) raise ConfigurationError(f"Parameters have mismatching cuda_devices: {devices_string}") elif len(devices) == 1: return devices.pop() else: return -1
This method checks the device of the model parameters to determine the cuda_device this model should be run on for predictions. If there are no parameters, it returns -1. Returns ------- The cuda device this model should run on for predictions.
def store_data(self, data, encoding='utf-8'): path = random_filename(self.work_path) try: with open(path, 'wb') as fh: if isinstance(data, str): data = data.encode(encoding) if data is not None: fh.write(data) return self.store_file(path) finally: try: os.unlink(path) except OSError: pass
Put the given content into a file, possibly encoding it as UTF-8 in the process.
def visit_listcomp(self, node): return "[%s %s]" % ( node.elt.accept(self), " ".join(n.accept(self) for n in node.generators), )
return an astroid.ListComp node as string
def _ensure_reactor_running(): if not reactor.running: signal_registrations = [] def signal_capture(*args, **kwargs): signal_registrations.append((orig_signal, args, kwargs)) def set_wakeup_fd_capture(*args, **kwargs): signal_registrations.append((orig_set_wakeup_fd, args, kwargs)) orig_signal = signal.signal signal.signal = signal_capture orig_set_wakeup_fd = signal.set_wakeup_fd signal.set_wakeup_fd = set_wakeup_fd_capture reactor_thread = threading.Thread(target=reactor.run, name="reactor") reactor_thread.daemon = True reactor_thread.start() while not reactor.running: time.sleep(0.01) time.sleep(0.01) signal.signal = orig_signal signal.set_wakeup_fd = orig_set_wakeup_fd for func, args, kwargs in signal_registrations: func(*args, **kwargs)
Starts the twisted reactor if it is not running already. The reactor is started in a new daemon-thread. Has to perform dirty hacks so that twisted can register signals even if it is not running in the main-thread.
def _sendResult(self, result): try: result = json.dumps(result) except Exception as error: result = json.dumps(self._errorInfo(command, error)) sys.stdout.write(result) sys.stdout.write("\n") sys.stdout.flush()
Send parseable json result of command.