code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def infer(self, inputPattern, computeScores=True, overCategories=True, partitionId=None): sparsity = 0.0 if self.minSparsity > 0.0: sparsity = ( float(len(inputPattern.nonzero()[0])) / len(inputPattern) ) if len(self._categoryList) == 0 or sparsity < self.minSparsity: winner = None inferenceResult = numpy.zeros(1) dist = numpy.ones(1) categoryDist = numpy.ones(1) else: maxCategoryIdx = max(self._categoryList) inferenceResult = numpy.zeros(maxCategoryIdx+1) dist = self._getDistances(inputPattern, partitionId=partitionId) validVectorCount = len(self._categoryList) - self._categoryList.count(-1) if self.exact: exactMatches = numpy.where(dist<0.00001)[0] if len(exactMatches) > 0: for i in exactMatches[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[i]] += 1.0 else: sorted = dist.argsort() for j in sorted[:min(self.k, validVectorCount)]: inferenceResult[self._categoryList[j]] += 1.0 if inferenceResult.any(): winner = inferenceResult.argmax() inferenceResult /= inferenceResult.sum() else: winner = None categoryDist = min_score_per_category(maxCategoryIdx, self._categoryList, dist) categoryDist.clip(0, 1.0, categoryDist) if self.verbosity >= 1: print "%s infer:" % (g_debugPrefix) print " active inputs:", _labeledInput(inputPattern, cellsPerCol=self.cellsPerCol) print " winner category:", winner print " pct neighbors of each category:", inferenceResult print " dist of each prototype:", dist print " dist of each category:", categoryDist result = (winner, inferenceResult, dist, categoryDist) return result
Finds the category that best matches the input pattern. Returns the winning category index as well as a distribution over all categories. :param inputPattern: (list or array) The pattern to be classified. This must be a dense representation of the array (e.g. [0, 0, 1, 1, 0, 1]). :param computeScores: NO EFFECT :param overCategories: NO EFFECT :param partitionId: (int) If provided, all training vectors with partitionId equal to that of the input pattern are ignored. For example, this may be used to perform k-fold cross validation without repopulating the classifier. First partition all the data into k equal partitions numbered 0, 1, 2, ... and then call learn() for each vector passing in its partitionId. Then, during inference, by passing in the partition ID in the call to infer(), all other vectors with the same partitionId are ignored simulating the effect of repopulating the classifier while ommitting the training vectors in the same partition. :returns: 4-tuple with these keys: - ``winner``: The category with the greatest number of nearest neighbors within the kth nearest neighbors. If the inferenceResult contains no neighbors, the value of winner is None. This can happen, for example, in cases of exact matching, if there are no stored vectors, or if minSparsity is not met. - ``inferenceResult``: A list of length numCategories, each entry contains the number of neighbors within the top k neighbors that are in that category. - ``dist``: A list of length numPrototypes. Each entry is the distance from the unknown to that prototype. All distances are between 0.0 and 1.0. - ``categoryDist``: A list of length numCategories. Each entry is the distance from the unknown to the nearest prototype of that category. All distances are between 0 and 1.0.
def getheaders(self, name): result = [] current = '' have_header = 0 for s in self.getallmatchingheaders(name): if s[0].isspace(): if current: current = "%s\n %s" % (current, s.strip()) else: current = s.strip() else: if have_header: result.append(current) current = s[s.find(":") + 1:].strip() have_header = 1 if have_header: result.append(current) return result
Get all values for a header. This returns a list of values for headers given more than once; each value in the result list is stripped in the same way as the result of getheader(). If the header is not given, return an empty list.
def find_intersections_with(self, other): import shapely.geometry geom = _convert_var_to_shapely_geometry(other) result = [] for p_start, p_end in zip(self.coords[:-1], self.coords[1:]): ls = shapely.geometry.LineString([p_start, p_end]) intersections = ls.intersection(geom) intersections = list(_flatten_shapely_collection(intersections)) intersections_points = [] for inter in intersections: if isinstance(inter, shapely.geometry.linestring.LineString): inter_start = (inter.coords[0][0], inter.coords[0][1]) inter_end = (inter.coords[-1][0], inter.coords[-1][1]) intersections_points.extend([inter_start, inter_end]) else: assert isinstance(inter, shapely.geometry.point.Point), ( "Expected to find shapely.geometry.point.Point or " "shapely.geometry.linestring.LineString intersection, " "actually found %s." % (type(inter),)) intersections_points.append((inter.x, inter.y)) inter_sorted = sorted( intersections_points, key=lambda p: np.linalg.norm(np.float32(p) - p_start) ) result.append(inter_sorted) return result
Find all intersection points between the line string and `other`. Parameters ---------- other : tuple of number or list of tuple of number or \ list of LineString or LineString The other geometry to use during intersection tests. Returns ------- list of list of tuple of number All intersection points. One list per pair of consecutive start and end point, i.e. `N-1` lists of `N` points. Each list may be empty or may contain multiple points.
def db_value(self, value): value = self.transform_value(value) return self.hhash.encrypt(value, salt_size=self.salt_size, rounds=self.rounds)
Convert the python value for storage in the database.
def edit_distance(s1, s2): d = {} lenstr1 = len(s1) lenstr2 = len(s2) for i in xrange(-1, lenstr1 + 1): d[(i, -1)] = i + 1 for j in xrange(-1, lenstr2 + 1): d[(-1, j)] = j + 1 for i in xrange(lenstr1): for j in xrange(lenstr2): if s1[i] == s2[j]: cost = 0 else: cost = 1 d[(i, j)] = min( d[(i - 1, j)] + 1, d[(i, j - 1)] + 1, d[(i - 1, j - 1)] + cost, ) if i and j and s1[i] == s2[j - 1] and s1[i - 1] == s2[j]: d[(i, j)] = min(d[(i, j)], d[i - 2, j - 2] + cost) return d[lenstr1 - 1, lenstr2 - 1]
Calculates string edit distance between string 1 and string 2. Deletion, insertion, substitution, and transposition all increase edit distance.
def debug(*args, **kwargs): if not (DEBUG and args): return None parent = kwargs.get('parent', None) with suppress(KeyError): kwargs.pop('parent') backlevel = kwargs.get('back', 1) with suppress(KeyError): kwargs.pop('back') frame = inspect.currentframe() while backlevel > 0: frame = frame.f_back backlevel -= 1 fname = os.path.split(frame.f_code.co_filename)[-1] lineno = frame.f_lineno if parent: func = '{}.{}'.format(parent.__class__.__name__, frame.f_code.co_name) else: func = frame.f_code.co_name lineinfo = '{}:{} {}: '.format( C(fname, 'yellow'), C(str(lineno).ljust(4), 'blue'), C().join(C(func, 'magenta'), '()').ljust(20) ) pargs = list(C(a, 'green').str() for a in args) pargs[0] = ''.join((lineinfo, pargs[0])) print_err(*pargs, **kwargs)
Print a message only if DEBUG is truthy.
def _delete_entity(self): if self._is_ndb(): _NDB_KEY(self._model, self._key_name).delete() else: entity_key = db.Key.from_path(self._model.kind(), self._key_name) db.delete(entity_key)
Delete entity from datastore. Attempts to delete using the key_name stored on the object, whether or not the given key is in the datastore.
def submit_registration_form(self, form): self.lock.acquire() try: if form and form.type!="cancel": self.registration_form = form iq = Iq(stanza_type = "set") iq.set_content(self.__register.submit_form(form)) self.set_response_handlers(iq, self.registration_success, self.registration_error) self.send(iq) else: self.__register = None finally: self.lock.release()
Submit a registration form. [client only] :Parameters: - `form`: the filled-in form. When form is `None` or its type is "cancel" the registration is to be canceled. :Types: - `form`: `pyxmpp.jabber.dataforms.Form`
def _initiate_starttls(self, **kwargs): if self._tls_state == "connected": raise RuntimeError("Already TLS-connected") kwargs["do_handshake_on_connect"] = False logger.debug("Wrapping the socket into ssl") self._socket = ssl.wrap_socket(self._socket, **kwargs) self._set_state("tls-handshake") self._continue_tls_handshake()
Initiate starttls handshake over the socket.
def writeToCheckpoint(self, checkpointDir): proto = self.getSchema().new_message() self.write(proto) checkpointPath = self._getModelCheckpointFilePath(checkpointDir) if os.path.exists(checkpointDir): if not os.path.isdir(checkpointDir): raise Exception(("Existing filesystem entry <%s> is not a model" " checkpoint -- refusing to delete (not a directory)") \ % checkpointDir) if not os.path.isfile(checkpointPath): raise Exception(("Existing filesystem entry <%s> is not a model" " checkpoint -- refusing to delete"\ " (%s missing or not a file)") % \ (checkpointDir, checkpointPath)) shutil.rmtree(checkpointDir) self.__makeDirectoryFromAbsolutePath(checkpointDir) with open(checkpointPath, 'wb') as f: proto.write(f)
Serializes model using capnproto and writes data to ``checkpointDir``
def omega_mixture(omegas, zs, CASRNs=None, Method=None, AvailableMethods=False): r def list_methods(): methods = [] if none_and_length_check([zs, omegas]): methods.append('SIMPLE') methods.append('NONE') return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method == 'SIMPLE': _omega = mixing_simple(zs, omegas) elif Method == 'NONE': _omega = None else: raise Exception('Failure in in function') return _omega
r'''This function handles the calculation of a mixture's acentric factor. Calculation is based on the omegas provided for each pure component. Will automatically select a method to use if no Method is provided; returns None if insufficient data is available. Examples -------- >>> omega_mixture([0.025, 0.12], [0.3, 0.7]) 0.0915 Parameters ---------- omegas : array-like acentric factors of each component, [-] zs : array-like mole fractions of each component, [-] CASRNs: list of strings CASRNs, not currently used [-] Returns ------- omega : float acentric factor of the mixture, [-] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain omega with the given inputs Other Parameters ---------------- Method : string, optional The method name to use. Only 'SIMPLE' is accepted so far. All valid values are also held in the list omega_mixture_methods. AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain omega for the desired chemical, and will return methods instead of omega Notes ----- The only data used in the methods implemented to date are mole fractions and pure-component omegas. An alternate definition could be based on the dew point or bubble point of a multicomponent mixture, but this has not been done to date. References ---------- .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill Professional, 2000.
def add_handler(self, handler): if not isinstance(handler, EventHandler): raise TypeError, "Not an EventHandler" with self.lock: if handler in self.handlers: return self.handlers.append(handler) self._update_handlers()
Add a handler object. :Parameters: - `handler`: the object providing event handler methods :Types: - `handler`: `EventHandler`
def start(self): self.bot_start_time = datetime.now() self.webserver = Webserver(self.config['webserver']['host'], self.config['webserver']['port']) self.plugins.load() self.plugins.load_state() self._find_event_handlers() self.sc = ThreadedSlackClient(self.config['slack_token']) self.always_send_dm = ['_unauthorized_'] if 'always_send_dm' in self.config: self.always_send_dm.extend(map(lambda x: '!' + x, self.config['always_send_dm'])) logging.getLogger('Rocket.Errors.ThreadPool').setLevel(logging.INFO) self.is_setup = True if self.test_mode: self.metrics['startup_time'] = (datetime.now() - self.bot_start_time).total_seconds() * 1000.0
Initializes the bot, plugins, and everything.
def T_dependent_property_derivative(self, T, order=1): r if self.method: if self.test_method_validity(T, self.method): try: return self.calculate_derivative(T, self.method, order) except: pass sorted_valid_methods = self.select_valid_methods(T) for method in sorted_valid_methods: try: return self.calculate_derivative(T, method, order) except: pass return None
r'''Method to obtain a derivative of a property with respect to temperature, of a given order. Methods found valid by `select_valid_methods` are attempted until a method succeeds. If no methods are valid and succeed, None is returned. Calls `calculate_derivative` internally to perform the actual calculation. .. math:: \text{derivative} = \frac{d (\text{property})}{d T} Parameters ---------- T : float Temperature at which to calculate the derivative, [K] order : int Order of the derivative, >= 1 Returns ------- derivative : float Calculated derivative property, [`units/K^order`]
def usage_palette(parser): parser.print_usage() print('') print('available palettes:') for palette in sorted(PALETTE): print(' %-12s' % (palette,)) return 0
Show usage and available palettes.
def _win32_dir(path, star=''): from ubelt import util_cmd import re wrapper = 'cmd /S /C "{}"' command = 'dir /-C "{}"{}'.format(path, star) wrapped = wrapper.format(command) info = util_cmd.cmd(wrapped, shell=True) if info['ret'] != 0: from ubelt import util_format print('Failed command:') print(info['command']) print(util_format.repr2(info, nl=1)) raise OSError(str(info)) lines = info['out'].split('\n')[5:-3] splitter = re.compile('( +)') for line in lines: parts = splitter.split(line) date, sep, time, sep, ampm, sep, type_or_size, sep = parts[:8] name = ''.join(parts[8:]) if name == '.' or name == '..': continue if type_or_size in ['<JUNCTION>', '<SYMLINKD>', '<SYMLINK>']: pos = name.find(':') bpos = name[:pos].rfind('[') name = name[:bpos - 1] pointed = name[bpos + 1:-1] yield type_or_size, name, pointed else: yield type_or_size, name, None
Using the windows cmd shell to get information about a directory
def __intermediate_proto(self, interface, address): address_proto = address.pop('proto', 'static') if 'proto' not in interface: return address_proto else: return interface.pop('proto')
determines UCI interface "proto" option
def send_message(source_jid, password, target_jid, body, subject = None, message_type = "chat", message_thread = None, settings = None): if sys.version_info.major < 3: from locale import getpreferredencoding encoding = getpreferredencoding() if isinstance(source_jid, str): source_jid = source_jid.decode(encoding) if isinstance(password, str): password = password.decode(encoding) if isinstance(target_jid, str): target_jid = target_jid.decode(encoding) if isinstance(body, str): body = body.decode(encoding) if isinstance(message_type, str): message_type = message_type.decode(encoding) if isinstance(message_thread, str): message_thread = message_thread.decode(encoding) if not isinstance(source_jid, JID): source_jid = JID(source_jid) if not isinstance(target_jid, JID): target_jid = JID(target_jid) msg = Message(to_jid = target_jid, body = body, subject = subject, stanza_type = message_type) def action(client): client.stream.send(msg) if settings is None: settings = XMPPSettings({"starttls": True, "tls_verify_peer": False}) if password is not None: settings["password"] = password handler = FireAndForget(source_jid, action, settings) try: handler.run() except KeyboardInterrupt: handler.disconnect() raise
Star an XMPP session and send a message, then exit. :Parameters: - `source_jid`: sender JID - `password`: sender password - `target_jid`: recipient JID - `body`: message body - `subject`: message subject - `message_type`: message type - `message_thread`: message thread id - `settings`: other settings :Types: - `source_jid`: `pyxmpp2.jid.JID` or `basestring` - `password`: `basestring` - `target_jid`: `pyxmpp.jid.JID` or `basestring` - `body`: `basestring` - `subject`: `basestring` - `message_type`: `basestring` - `settings`: `pyxmpp2.settings.XMPPSettings`
def find_sections(lines): sections = [] for line in lines: if is_heading(line): sections.append(get_heading(line)) return sections
Find all section names and return a list with their names.
def memcopy(self, stream, offset=0, length=float("inf")): data = [ord(i) for i in list(stream)] size = min(length, len(data), self.m_size) buff = cast(self.m_buf, POINTER(c_uint8)) for i in range(size): buff[offset + i] = data[i]
Copy stream to buffer
def tell(self, message, sender=no_sender): if sender is not no_sender and not isinstance(sender, ActorRef): raise ValueError("Sender must be actor reference") self._cell.send_message(message, sender)
Send a message to this actor. Asynchronous fire-and-forget. :param message: The message to send. :type message: Any :param sender: The sender of the message. If provided it will be made available to the receiving actor via the :attr:`Actor.sender` attribute. :type sender: :class:`Actor`
def natural_keys(text): def atoi(text): return int(text) if text.isdigit() else text return [atoi(c) for c in re.split('(\d+)', text)]
Sort list of string with number in human order. Examples ---------- >>> l = ['im1.jpg', 'im31.jpg', 'im11.jpg', 'im21.jpg', 'im03.jpg', 'im05.jpg'] >>> l.sort(key=tl.files.natural_keys) ['im1.jpg', 'im03.jpg', 'im05', 'im11.jpg', 'im21.jpg', 'im31.jpg'] >>> l.sort() # that is what we dont want ['im03.jpg', 'im05', 'im1.jpg', 'im11.jpg', 'im21.jpg', 'im31.jpg'] References ---------- - `link <http://nedbatchelder.com/blog/200712/human_sorting.html>`__
def build_url(base, seg, query=None): def clean_segment(segment): segment = segment.strip('/') if isinstance(segment, basestring): segment = segment.encode('utf-8') return segment seg = (quote(clean_segment(s)) for s in seg) if query is None or len(query) == 0: query_string = '' else: query_string = "?" + urlencode(query) path = '/'.join(seg) + query_string adjusted_base = base.rstrip('/') + '/' return urljoin(str(adjusted_base), str(path))
Create a URL from a list of path segments and an optional dict of query parameters.
def align_and_parse(handle, max_internal_indels=5, is_gbs=False): try: with open(handle, 'rb') as infile: clusts = infile.read().split("//\n//\n") clusts = [i for i in clusts if i] if not clusts: raise IPyradError except (IOError, IPyradError): LOGGER.debug("skipping empty chunk - {}".format(handle)) return 0 highindels = 0 try: aligned = persistent_popen_align3(clusts, 200, is_gbs) except Exception as inst: LOGGER.debug("Error in handle - {} - {}".format(handle, inst)) aligned = [] refined = [] for clust in aligned: filtered = aligned_indel_filter(clust, max_internal_indels) if not filtered: refined.append(clust) else: highindels += 1 if refined: outhandle = handle.rsplit(".", 1)[0]+".aligned" with open(outhandle, 'wb') as outfile: outfile.write("\n//\n//\n".join(refined)+"\n") log_level = logging.getLevelName(LOGGER.getEffectiveLevel()) if not log_level == "DEBUG": os.remove(handle) return highindels
much faster implementation for aligning chunks
def convert(self, value, param, ctx): resource = tower_cli.get_resource(self.resource_name) if value is None: return None if isinstance(value, int): return value if re.match(r'^[\d]+$', value): return int(value) if value == 'null': return value try: debug.log('The %s field is given as a name; ' 'looking it up.' % param.name, header='details') lookup_data = {resource.identity[-1]: value} rel = resource.get(**lookup_data) except exc.MultipleResults: raise exc.MultipleRelatedError( 'Cannot look up {0} exclusively by name, because multiple {0} ' 'objects exist with that name.\n' 'Please send an ID. You can get the ID for the {0} you want ' 'with:\n' ' tower-cli {0} list --name "{1}"'.format(self.resource_name, value), ) except exc.TowerCLIError as ex: raise exc.RelatedError('Could not get %s. %s' % (self.resource_name, str(ex))) return rel['id']
Return the appropriate integer value. If a non-integer is provided, attempt a name-based lookup and return the primary key.
def verify_roster_set(self, fix = False, settings = None): try: self._verify((None, u"remove"), fix) except ValueError, err: raise BadRequestProtocolError(unicode(err)) if self.ask: if fix: self.ask = None else: raise BadRequestProtocolError("'ask' in roster set") if self.approved: if fix: self.approved = False else: raise BadRequestProtocolError("'approved' in roster set") if settings is None: settings = XMPPSettings() name_length_limit = settings["roster_name_length_limit"] if self.name and len(self.name) > name_length_limit: raise NotAcceptableProtocolError(u"Roster item name too long") group_length_limit = settings["roster_group_name_length_limit"] for group in self.groups: if not group: raise NotAcceptableProtocolError(u"Roster group name empty") if len(group) > group_length_limit: raise NotAcceptableProtocolError(u"Roster group name too long") if self._duplicate_group: raise BadRequestProtocolError(u"Item group duplicated")
Check if `self` is valid roster set item. For use on server to validate incoming roster sets. Valid item must have proper `subscription` value other and valid value for 'ask'. The lengths of name and group names must fit the configured limits. :Parameters: - `fix`: if `True` than replace invalid 'subscription' and 'ask' values with right defaults - `settings`: settings object providing the name limits :Types: - `fix`: `bool` - `settings`: `XMPPSettings` :Raise: `BadRequestProtocolError` if the item is invalid.
def _parse_annotations(sbase): annotation = {} if sbase.isSetSBOTerm(): annotation["sbo"] = sbase.getSBOTermID() cvterms = sbase.getCVTerms() if cvterms is None: return annotation for cvterm in cvterms: for k in range(cvterm.getNumResources()): uri = cvterm.getResourceURI(k) match = URL_IDENTIFIERS_PATTERN.match(uri) if not match: LOGGER.warning("%s does not conform to " "http(s)://identifiers.org/collection/id", uri) continue provider, identifier = match.group(1), match.group(2) if provider in annotation: if isinstance(annotation[provider], string_types): annotation[provider] = [annotation[provider]] if identifier not in annotation[provider]: annotation[provider].append(identifier) else: annotation[provider] = identifier return annotation
Parses cobra annotations from a given SBase object. Annotations are dictionaries with the providers as keys. Parameters ---------- sbase : libsbml.SBase SBase from which the SBML annotations are read Returns ------- dict (annotation dictionary) FIXME: annotation format must be updated (this is a big collection of fixes) - see: https://github.com/opencobra/cobrapy/issues/684)
def __checkCancelation(self): print >>sys.stderr, "reporter:counter:HypersearchWorker,numRecords,50" jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['cancel'])[0] if jobCancel: self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED self._isCanceled = True self._logger.info("Model %s canceled because Job %s was stopped.", self._modelID, self._jobID) else: stopReason = self._jobsDAO.modelsGetFields(self._modelID, ['engStop'])[0] if stopReason is None: pass elif stopReason == ClientJobsDAO.STOP_REASON_KILLED: self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED self._isKilled = True self._logger.info("Model %s canceled because it was killed by hypersearch", self._modelID) elif stopReason == ClientJobsDAO.STOP_REASON_STOPPED: self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED self._isCanceled = True self._logger.info("Model %s stopped because hypersearch ended", self._modelID) else: raise RuntimeError ("Unexpected stop reason encountered: %s" % (stopReason))
Check if the cancelation flag has been set for this model in the Model DB
def _read_config_file(config_file, verbose): config_file = os.path.abspath(config_file) if not os.path.exists(config_file): raise RuntimeError("Couldn't open configuration file '{}'.".format(config_file)) if config_file.endswith(".json"): with io.open(config_file, mode="r", encoding="utf-8") as json_file: minified = jsmin(json_file.read()) conf = json.loads(minified) elif config_file.endswith(".yaml"): with io.open(config_file, mode="r", encoding="utf-8") as yaml_file: conf = yaml.safe_load(yaml_file) else: try: import imp conf = {} configmodule = imp.load_source("configuration_module", config_file) for k, v in vars(configmodule).items(): if k.startswith("__"): continue elif isfunction(v): continue conf[k] = v except Exception: exc_type, exc_value = sys.exc_info()[:2] exc_info_list = traceback.format_exception_only(exc_type, exc_value) exc_text = "\n".join(exc_info_list) print( "Failed to read configuration file: " + config_file + "\nDue to " + exc_text, file=sys.stderr, ) raise conf["_config_file"] = config_file return conf
Read configuration file options into a dictionary.
def grep(prev, pattern, *args, **kw): inv = False if 'inv' not in kw else kw.pop('inv') pattern_obj = re.compile(pattern, *args, **kw) for data in prev: if bool(inv) ^ bool(pattern_obj.match(data)): yield data
The pipe greps the data passed from previous generator according to given regular expression. :param prev: The previous iterator of pipe. :type prev: Pipe :param pattern: The pattern which used to filter out data. :type pattern: str|unicode|re pattern object :param inv: If true, invert the match condition. :type inv: boolean :param kw: :type kw: dict :returns: generator
def sanitize_path(path): if path == '/': return path if path[:1] != '/': raise InvalidPath('The path must start with a slash') path = re.sub(r'/+', '/', path) return path.rstrip('/')
Performs sanitation of the path after validating :param path: path to sanitize :return: path :raises: - InvalidPath if the path doesn't start with a slash
def next_event(self, event_id, prev=False): i = self.events.index(self._events_dict[event_id]) if prev and i > 0: return self.events[i - 1] elif not prev and i + 1 < len(self.events): return self.events[i + 1] else: return None
Get the event following another event in this conversation. Args: event_id (str): ID of the event. prev (bool): If ``True``, return the previous event rather than the next event. Defaults to ``False``. Raises: KeyError: If no such :class:`.ConversationEvent` is known. Returns: :class:`.ConversationEvent` or ``None`` if there is no following event.
def read_docs(self, docsfiles): updates = DocParser() for docsfile in _list(docsfiles): if os.path.isfile(docsfile): updates.parse(docsfile) self.docs.update((k, _docs(updates[k], self.docvars)) for k in self.docs if updates.blocks[k]) for name, text in updates['parameters'].items(): if name in self: self.getparam(name).docs = text[0] % self.docvars elif name not in self.ignore: raise ValueError("parameter %r does not exist" % name)
Read program documentation from a DocParser compatible file. docsfiles is a list of paths to potential docsfiles: parse if present. A string is taken as a list of one item.
def _ConvertStructMessage(value, message): if not isinstance(value, dict): raise ParseError( 'Struct must be in a dict which is {0}.'.format(value)) for key in value: _ConvertValueMessage(value[key], message.fields[key]) return
Convert a JSON representation into Struct message.
def prune(self, depth=0): for n in list(self.nodes): if len(n.links) <= depth: self.remove_node(n.id)
Removes all nodes with less or equal links than depth.
def mmGetMetricSequencesPredictedActiveCellsPerColumn(self): self._mmComputeTransitionTraces() numCellsPerColumn = [] for predictedActiveCells in ( self._mmData["predictedActiveCellsForSequence"].values()): cellsForColumn = self.mapCellsToColumns(predictedActiveCells) numCellsPerColumn += [len(x) for x in cellsForColumn.values()] return Metric(self, " numCellsPerColumn)
Metric for number of predicted => active cells per column for each sequence @return (Metric) metric
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False if context.endswith(' operator++') or context.endswith(' operator--'): return False remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): return False error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True
Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise.
def decree(cls, path, concrete_start='', **kwargs): try: return cls(_make_decree(path, concrete_start), **kwargs) except KeyError: raise Exception(f'Invalid binary: {path}')
Constructor for Decree binary analysis. :param str path: Path to binary to analyze :param str concrete_start: Concrete stdin to use before symbolic input :param kwargs: Forwarded to the Manticore constructor :return: Manticore instance, initialized with a Decree State :rtype: Manticore
def get_termination_stats(self, get_cos=True): delta_vals = self._last_vals - self.param_vals delta_err = self._last_error - self.error frac_err = delta_err / self.error to_return = {'delta_vals':delta_vals, 'delta_err':delta_err, 'num_iter':1*self._num_iter, 'frac_err':frac_err, 'error':self.error, 'exp_err':self._exp_err} if get_cos: model_cosine = self.calc_model_cosine() to_return.update({'model_cosine':model_cosine}) return to_return
Returns a dict of termination statistics Parameters ---------- get_cos : Bool, optional Whether or not to calcualte the cosine of the residuals with the tangent plane of the model using the current J. The calculation may take some time. Default is True Returns ------- dict Has keys delta_vals : The last change in parameter values. delta_err : The last change in the error. exp_err : The expected (last) change in the error. frac_err : The fractional change in the error. num_iter : The number of iterations completed. error : The current error.
def add(reader, writer, column, start, stop, value): for i, row in enumerate(reader): if i >= start and i <= stop: row[column] = type(value)(row[column]) + value writer.appendRecord(row)
Adds a value over a range of rows. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. column: The column of data to modify. start: The first row in the range to modify. end: The last row in the range to modify. value: The value to add.
def add_pfba(model, objective=None, fraction_of_optimum=1.0): if objective is not None: model.objective = objective if model.solver.objective.name == '_pfba_objective': raise ValueError('The model already has a pFBA objective.') sutil.fix_objective_as_constraint(model, fraction=fraction_of_optimum) reaction_variables = ((rxn.forward_variable, rxn.reverse_variable) for rxn in model.reactions) variables = chain(*reaction_variables) model.objective = model.problem.Objective( Zero, direction='min', sloppy=True, name="_pfba_objective") model.objective.set_linear_coefficients({v: 1.0 for v in variables})
Add pFBA objective Add objective to minimize the summed flux of all reactions to the current objective. See Also ------- pfba Parameters ---------- model : cobra.Model The model to add the objective to objective : An objective to set in combination with the pFBA objective. fraction_of_optimum : float Fraction of optimum which must be maintained. The original objective reaction is constrained to be greater than maximal_value * fraction_of_optimum.
def matchPatterns(patterns, keys): results = [] if patterns: for pattern in patterns: prog = re.compile(pattern) for key in keys: if prog.match(key): results.append(key) else: return None return results
Returns a subset of the keys that match any of the given patterns :param patterns: (list) regular expressions to match :param keys: (list) keys to search for matches
def arrow(self, x, y, width, type=NORMAL, draw=True, **kwargs): path = self.BezierPath(**kwargs) if type == self.NORMAL: head = width * .4 tail = width * .2 path.moveto(x, y) path.lineto(x - head, y + head) path.lineto(x - head, y + tail) path.lineto(x - width, y + tail) path.lineto(x - width, y - tail) path.lineto(x - head, y - tail) path.lineto(x - head, y - head) path.lineto(x, y) elif type == self.FORTYFIVE: head = .3 tail = 1 + head path.moveto(x, y) path.lineto(x, y + width * (1 - head)) path.lineto(x - width * head, y + width) path.lineto(x - width * head, y + width * tail * .4) path.lineto(x - width * tail * .6, y + width) path.lineto(x - width, y + width * tail * .6) path.lineto(x - width * tail * .4, y + width * head) path.lineto(x - width, y + width * head) path.lineto(x - width * (1 - head), y) path.lineto(x, y) else: raise NameError(_("arrow: available types for arrow() are NORMAL and FORTYFIVE\n")) if draw: path.draw() return path
Draw an arrow. Arrows can be two types: NORMAL or FORTYFIVE. :param x: top left x-coordinate :param y: top left y-coordinate :param width: width of arrow :param type: NORMAL or FORTYFIVE :draw: If True draws arrow immediately :return: Path object representing the arrow.
def get(self, id): info = super(Images, self).get(id) return ImageActions(self.api, parent=self, **info)
id or slug
def fit(self, X, y=None): X = check_array(X) self._x_min = X.min(axis=0) self._x_max = X.max(axis=0) return self
Find min and max values of every feature. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The training input samples. y : Ignored not used, present for API consistency by convention. Returns ------- self : object
def init_options(self): self.options = GoogleMapOptions() d = self.declaration self.set_map_type(d.map_type) if d.ambient_mode: self.set_ambient_mode(d.ambient_mode) if (d.camera_position or d.camera_zoom or d.camera_tilt or d.camera_bearing): self.update_camera() if d.map_bounds: self.set_map_bounds(d.map_bounds) if not d.show_compass: self.set_show_compass(d.show_compass) if not d.show_zoom_controls: self.set_show_zoom_controls(d.show_zoom_controls) if not d.show_toolbar: self.set_show_toolbar(d.show_toolbar) if d.lite_mode: self.set_lite_mode(d.lite_mode) if not d.rotate_gestures: self.set_rotate_gestures(d.rotate_gestures) if not d.scroll_gestures: self.set_scroll_gestures(d.scroll_gestures) if not d.tilt_gestures: self.set_tilt_gestures(d.tilt_gestures) if not d.zoom_gestures: self.set_zoom_gestures(d.zoom_gestures) if d.min_zoom: self.set_min_zoom(d.min_zoom) if d.max_zoom: self.set_max_zoom(d.max_zoom)
Initialize the underlying map options.
def set_bot(self, bot): self.bot = bot self.sink.set_bot(bot)
Bot must be set before running
def round_any(x, accuracy, f=np.round): if not hasattr(x, 'dtype'): x = np.asarray(x) return f(x / accuracy) * accuracy
Round to multiple of any number.
def read_temple_config(): with open(temple.constants.TEMPLE_CONFIG_FILE) as temple_config_file: return yaml.load(temple_config_file, Loader=yaml.SafeLoader)
Reads the temple YAML configuration file in the repository
def add_metabolites(self, metabolite_list): if not hasattr(metabolite_list, '__iter__'): metabolite_list = [metabolite_list] if len(metabolite_list) == 0: return None metabolite_list = [x for x in metabolite_list if x.id not in self.metabolites] bad_ids = [m for m in metabolite_list if not isinstance(m.id, string_types) or len(m.id) < 1] if len(bad_ids) != 0: raise ValueError('invalid identifiers in {}'.format(repr(bad_ids))) for x in metabolite_list: x._model = self self.metabolites += metabolite_list to_add = [] for met in metabolite_list: if met.id not in self.constraints: constraint = self.problem.Constraint( Zero, name=met.id, lb=0, ub=0) to_add += [constraint] self.add_cons_vars(to_add) context = get_context(self) if context: context(partial(self.metabolites.__isub__, metabolite_list)) for x in metabolite_list: context(partial(setattr, x, '_model', None))
Will add a list of metabolites to the model object and add new constraints accordingly. The change is reverted upon exit when using the model as a context. Parameters ---------- metabolite_list : A list of `cobra.core.Metabolite` objects
def get_href(self): safe = "/" + "!*'()," + "$-_|." return compat.quote( self.provider.mount_path + self.provider.share_path + self.get_preferred_path(), safe=safe, )
Convert path to a URL that can be passed to XML responses. Byte string, UTF-8 encoded, quoted. See http://www.webdav.org/specs/rfc4918.html#rfc.section.8.3 We are using the path-absolute option. i.e. starting with '/'. URI ; See section 3.2.1 of [RFC2068]
def _addPartitionId(self, index, partitionId=None): if partitionId is None: self._partitionIdList.append(numpy.inf) else: self._partitionIdList.append(partitionId) indices = self._partitionIdMap.get(partitionId, []) indices.append(index) self._partitionIdMap[partitionId] = indices
Adds partition id for pattern index
def isValidClass(self, class_): module = inspect.getmodule(class_) valid = ( module in self._valid_modules or ( hasattr(module, '__file__') and module.__file__ in self._valid_named_modules ) ) return valid and not private(class_)
Needs to be its own method so it can be called from both wantClass and registerGoodClass.
def _err(self, msg): out = '%s%s' % ('[%s] ' % self.description if len(self.description) > 0 else '', msg) if self.kind == 'warn': print(out) return self elif self.kind == 'soft': global _soft_err _soft_err.append(out) return self else: raise AssertionError(out)
Helper to raise an AssertionError, and optionally prepend custom description.
def list_targets_by_rule(client=None, **kwargs): result = client.list_targets_by_rule(**kwargs) if not result.get("Targets"): result.update({"Targets": []}) return result
Rule='string'
def chirp(t, f0=0., t1=1., f1=100., form='linear', phase=0): r valid_forms = ['linear', 'quadratic', 'logarithmic'] if form not in valid_forms: raise ValueError("Invalid form. Valid form are %s" % valid_forms) t = numpy.array(t) phase = 2. * pi * phase / 360. if form == "linear": a = pi * (f1 - f0)/t1 b = 2. * pi * f0 y = numpy.cos(a * t**2 + b*t + phase) elif form == "quadratic": a = (2/3. * pi * (f1-f0)/t1/t1) b = 2. * pi * f0 y = numpy.cos(a*t**3 + b * t + phase) elif form == "logarithmic": a = 2. * pi * t1/numpy.log(f1-f0) b = 2. * pi * f0 x = (f1-f0)**(1./t1) y = numpy.cos(a * x**t + b * t + phase) return y
r"""Evaluate a chirp signal at time t. A chirp signal is a frequency swept cosine wave. .. math:: a = \pi (f_1 - f_0) / t_1 .. math:: b = 2 \pi f_0 .. math:: y = \cos\left( \pi\frac{f_1-f_0}{t_1} t^2 + 2\pi f_0 t + \rm{phase} \right) :param array t: times at which to evaluate the chirp signal :param float f0: frequency at time t=0 (Hz) :param float t1: time t1 :param float f1: frequency at time t=t1 (Hz) :param str form: shape of frequency sweep in ['linear', 'quadratic', 'logarithmic'] :param float phase: phase shift at t=0 The parameter **form** can be: * 'linear' :math:`f(t) = (f_1-f_0)(t/t_1) + f_0` * 'quadratic' :math:`f(t) = (f_1-f_0)(t/t_1)^2 + f_0` * 'logarithmic' :math:`f(t) = (f_1-f_0)^{(t/t_1)} + f_0` Example: .. plot:: :include-source: :width: 80% from spectrum import chirp from pylab import linspace, plot t = linspace(0, 1, 1000) y = chirp(t, form='linear') plot(y) y = chirp(t, form='quadratic') plot(y, 'r')
def get(self, store_id, customer_id, **queryparams): self.store_id = store_id self.customer_id = customer_id return self._mc_client._get(url=self._build_path(store_id, 'customers', customer_id), **queryparams)
Get information about a specific customer. :param store_id: The store id. :type store_id: :py:class:`str` :param customer_id: The id for the customer of a store. :type customer_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []
def get_constants(self, **params: keys): url = self.api.CONSTANTS return self._get_model(url, **params)
Get the CR Constants Parameters ---------- \*\*keys: Optional[list] = None Filter which keys should be included in the response \*\*exclude: Optional[list] = None Filter which keys should be excluded from the response \*\*timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
def parse_arguments(*args, **options): days = options.get('days', 1) enterprise_customer_uuid = options.get('enterprise_customer_uuid') enterprise_customer = None if enterprise_customer_uuid: try: enterprise_customer = EnterpriseCustomer.objects.get(uuid=enterprise_customer_uuid) except EnterpriseCustomer.DoesNotExist: raise CommandError('Enterprise customer with uuid "{enterprise_customer_uuid}" does not exist.'.format( enterprise_customer_uuid=enterprise_customer_uuid )) return days, enterprise_customer
Parse and validate arguments for send_course_enrollments command. Arguments: *args: Positional arguments passed to the command **options: optional arguments passed to the command Returns: A tuple containing parsed values for 1. days (int): Integer showing number of days to lookup enterprise enrollments, course completion etc and send to xAPI LRS 2. enterprise_customer_uuid (EnterpriseCustomer): Enterprise Customer if present then send xAPI statements just for this enterprise.
def set_doc_data_lics(self, doc, lics): if not self.doc_data_lics_set: self.doc_data_lics_set = True if validations.validate_data_lics(lics): doc.data_license = document.License.from_identifier(lics) return True else: raise SPDXValueError('Document::DataLicense') else: raise CardinalityError('Document::DataLicense')
Sets the document data license. Raises value error if malformed value, CardinalityError if already defined.
def train_model(best_processed_path, weight_path='../weight/model_weight.h5', verbose=2): x_train_char, x_train_type, y_train = prepare_feature(best_processed_path, option='train') x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='test') validation_set = False if os.path.isdir(os.path.join(best_processed_path, 'val')): validation_set = True x_val_char, x_val_type, y_val = prepare_feature(best_processed_path, option='val') if not os.path.isdir(os.path.dirname(weight_path)): os.makedirs(os.path.dirname(weight_path)) callbacks_list = [ ReduceLROnPlateau(), ModelCheckpoint( weight_path, save_best_only=True, save_weights_only=True, monitor='val_loss', mode='min', verbose=1 ) ] model = get_convo_nn2() train_params = [(10, 256), (3, 512), (3, 2048), (3, 4096), (3, 8192)] for (epochs, batch_size) in train_params: print("train with {} epochs and {} batch size".format(epochs, batch_size)) if validation_set: model.fit([x_train_char, x_train_type], y_train, epochs=epochs, batch_size=batch_size, verbose=verbose, callbacks=callbacks_list, validation_data=([x_val_char, x_val_type], y_val)) else: model.fit([x_train_char, x_train_type], y_train, epochs=epochs, batch_size=batch_size, verbose=verbose, callbacks=callbacks_list) return model
Given path to processed BEST dataset, train CNN model for words beginning alongside with character label encoder and character type label encoder Input ===== best_processed_path: str, path to processed BEST dataset weight_path: str, path to weight path file verbose: int, verbost option for training Keras model Output ====== model: keras model, keras model for tokenize prediction
def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE): byte_contents = BytesIO(contents.encode('utf8')) info = tarfile.TarInfo(name=name) info.size = len(contents) info.mtime = 0 info.type = tarfile.REGTYPE info.mode = int(mode, 8) tar.addfile(tarinfo=info, fileobj=byte_contents)
Adds a single file in tarfile instance. :param tar: tarfile instance :param name: string representing filename or path :param contents: string representing file contents :param mode: string representing file mode, defaults to 644 :returns: None
def mongo(daemon=False, port=20771): cmd = "mongod --port {0}".format(port) if daemon: cmd += " --fork" run(cmd)
Run the mongod process.
def string_presenter(self, dumper, data): if '\n' in data: return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') else: return dumper.represent_scalar('tag:yaml.org,2002:str', data)
Presenter to force yaml.dump to use multi-line string style.
def set_shared_config(cls, config): assert isinstance(config, dict) cls._sharedInstance.config.update(config) if cls._sharedInstance.instance: cls._sharedInstance.instance = None
This allows to set a config that will be used when calling ``shared_blockchain_instance`` and allows to define the configuration without requiring to actually create an instance
def setup(app): lexer = MarkdownLexer() for alias in lexer.aliases: app.add_lexer(alias, lexer) return dict(version=__version__)
Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
def _categoryToLabelList(self, category): if category is None: return [] labelList = [] labelNum = 0 while category > 0: if category % 2 == 1: labelList.append(self.saved_categories[labelNum]) labelNum += 1 category = category >> 1 return labelList
Converts a category number into a list of labels
def scale(self, w=1.0, h=1.0): from types import FloatType w0, h0 = self.img.size if type(w) == FloatType: w = int(w*w0) if type(h) == FloatType: h = int(h*h0) self.img = self.img.resize((w,h), INTERPOLATION) self.w = w self.h = h
Resizes the layer to the given width and height. When width w or height h is a floating-point number, scales percentual, otherwise scales to the given size in pixels.
def namespace_array(ns_key): obs_sch = namespace(ns_key) obs_sch['title'] = 'Observation' sch = copy.deepcopy(JAMS_SCHEMA['definitions']['SparseObservationList']) sch['items'] = obs_sch return sch
Construct a validation schema for arrays of a given namespace. Parameters ---------- ns_key : str Namespace key identifier Returns ------- schema : dict JSON schema of `namespace` observation arrays
def rssi(self, timeout_sec=TIMEOUT_SEC): self._rssi_read.clear() self._peripheral.readRSSI() if not self._rssi_read.wait(timeout_sec): raise RuntimeError('Exceeded timeout waiting for RSSI value!') return self._rssi
Return the RSSI signal strength in decibels.
def fit(self, x, y=None): if self._dtype is not None: iter2array(x, dtype=self._dtype) else: iter2array(x) return self
Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines.
def can_reach(self, node, traversable=lambda node, edge: True): if isinstance(node, str): node = self.graph[node] for n in self.graph.nodes: n._visited = False return proximity.depth_first_search(self, visit=lambda n: node == n, traversable=traversable )
Returns True if given node can be reached over traversable edges. To enforce edge direction, use a node==edge.node1 traversable.
def nexmake(mdict, nlocus, dirs, mcmc_burnin, mcmc_ngen, mcmc_sample_freq): max_name_len = max([len(i) for i in mdict]) namestring = "{:<" + str(max_name_len+1) + "} {}\n" matrix = "" for i in mdict.items(): matrix += namestring.format(i[0], i[1]) handle = os.path.join(dirs, "{}.nex".format(nlocus)) with open(handle, 'w') as outnex: outnex.write(NEXBLOCK.format(**{ "ntax": len(mdict), "nchar": len(mdict.values()[0]), "matrix": matrix, "ngen": mcmc_ngen, "sfreq": mcmc_sample_freq, "burnin": mcmc_burnin, }))
function that takes a dictionary mapping names to sequences, and a locus number, and writes it as a NEXUS file with a mrbayes analysis block.
def is_oct(ip): try: dec = int(str(ip), 8) except (TypeError, ValueError): return False if dec > 0o37777777777 or dec < 0: return False return True
Return true if the IP address is in octal notation.
def load_values(self): for config_name, evar in self.evar_defs.items(): if evar.is_required and evar.name not in os.environ: raise RuntimeError(( "Missing required environment variable: {evar_name}\n" "{help_txt}" ).format(evar_name=evar.name, help_txt=evar.help_txt)) if evar.name in os.environ: self[config_name] = os.environ.get(evar.name) else: self[config_name] = evar.default_val for filter in evar.filters: current_val = self.get(config_name) new_val = filter(current_val, evar) self[config_name] = new_val self._filter_all()
Go through the env var map, transferring the values to this object as attributes. :raises: RuntimeError if a required env var isn't defined.
def parse_typing_status_message(p): return TypingStatusMessage( conv_id=p.conversation_id.id, user_id=from_participantid(p.sender_id), timestamp=from_timestamp(p.timestamp), status=p.type, )
Return TypingStatusMessage from hangouts_pb2.SetTypingNotification. The same status may be sent multiple times consecutively, and when a message is sent the typing status will not change to stopped.
def handle_extracted_license(self, extr_lic): lic = self.parse_only_extr_license(extr_lic) if lic is not None: self.doc.add_extr_lic(lic) return lic
Build and return an ExtractedLicense or None. Note that this function adds the license to the document.
def write(self, text, hashline=b" u if not text.endswith(b"\n"): text += b"\n" actual_hash = hashlib.sha1(text).hexdigest() with open(self.filename, "wb") as f: f.write(text) f.write(hashline.decode("utf8").format(actual_hash).encode("utf8")) f.write(b"\n")
u""" Write `text` to the file. Writes the text to the file, with a final line checksumming the contents. The entire file must be written with one `.write()` call. The last line is written with the `hashline` format string, which can be changed to accommodate different file syntaxes. Both arguments are UTF8 byte strings. Arguments: text (UTF8 byte string): the contents of the file to write. hashline (UTF8 byte string): the format of the last line to append to the file, with "{}" replaced with the hash.
def start_server(self, datacenter_id, server_id): response = self._perform_request( url='/datacenters/%s/servers/%s/start' % ( datacenter_id, server_id), method='POST-ACTION') return response
Starts the server. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str``
def level_chunker(text, getreffs, level=1): references = getreffs(level=level) return [(ref.split(":")[-1], ref.split(":")[-1]) for ref in references]
Chunk a text at the passage level :param text: Text object :type text: MyCapytains.resources.text.api :param getreffs: Callback function to retrieve text :type getreffs: function(level) :return: List of urn references with their human readable version :rtype: [(str, str)]
def calc_J(self): del self.J self.J = np.zeros([self.param_vals.size, self.data.size]) dp = np.zeros_like(self.param_vals) f0 = self.model.copy() for a in range(self.param_vals.size): dp *= 0 dp[a] = self.dl[a] f1 = self.func(self.param_vals + dp, *self.func_args, **self.func_kwargs) grad_func = (f1 - f0) / dp[a] self.J[a] = -grad_func
Updates self.J, returns nothing
def repolist(status='', media=None): manager = MANAGER with settings(hide('running', 'stdout')): if media: repos = run_as_root("%(manager)s repolist %(status)s | sed '$d' | sed -n '/repo id/,$p'" % locals()) else: repos = run_as_root("%(manager)s repolist %(status)s | sed '/Media\\|Debug/d' | sed '$d' | sed -n '/repo id/,$p'" % locals()) return [line.split(' ')[0] for line in repos.splitlines()[1:]]
Get the list of ``yum`` repositories. Returns enabled repositories by default. Extra *status* may be passed to list disabled repositories if necessary. Media and debug repositories are kept disabled, except if you pass *media*. :: import burlap # Install a package that may be included in disabled repositories burlap.rpm.install('vim', burlap.rpm.repolist('disabled'))
def invalidate_ip(self, ip): if self._use_cache: key = self._make_cache_key(ip) self._cache.delete(key, version=self._cache_version)
Invalidate httpBL cache for IP address :param ip: ipv4 IP address
def identify_phase(T, P, Tm=None, Tb=None, Tc=None, Psat=None): r if Tm and T <= Tm: return 's' elif Tc and T >= Tc: return 'g' elif Psat: if P <= Psat: return 'g' elif P > Psat: return 'l' elif Tb: if 9E4 < P < 1.1E5: if T < Tb: return 'l' else: return 'g' elif P > 1.1E5 and T <= Tb: return 'l' else: return None else: return None
r'''Determines the phase of a one-species chemical system according to basic rules, using whatever information is available. Considers only the phases liquid, solid, and gas; does not consider two-phase scenarios, as should occurs between phase boundaries. * If the melting temperature is known and the temperature is under or equal to it, consider it a solid. * If the critical temperature is known and the temperature is greater or equal to it, consider it a gas. * If the vapor pressure at `T` is known and the pressure is under or equal to it, consider it a gas. If the pressure is greater than the vapor pressure, consider it a liquid. * If the melting temperature, critical temperature, and vapor pressure are not known, attempt to use the boiling point to provide phase information. If the pressure is between 90 kPa and 110 kPa (approximately normal), consider it a liquid if it is under the boiling temperature and a gas if above the boiling temperature. * If the pressure is above 110 kPa and the boiling temperature is known, consider it a liquid if the temperature is under the boiling temperature. * Return None otherwise. Parameters ---------- T : float Temperature, [K] P : float Pressure, [Pa] Tm : float, optional Normal melting temperature, [K] Tb : float, optional Normal boiling point, [K] Tc : float, optional Critical temperature, [K] Psat : float, optional Vapor pressure of the fluid at `T`, [Pa] Returns ------- phase : str Either 's', 'l', 'g', or None if the phase cannot be determined Notes ----- No special attential is paid to any phase transition. For the case where the melting point is not provided, the possibility of the fluid being solid is simply ignored. Examples -------- >>> identify_phase(T=280, P=101325, Tm=273.15, Psat=991) 'l'
def add_droplets(self, droplet_ids): return self.get_data( "load_balancers/%s/droplets/" % self.id, type=POST, params={"droplet_ids": droplet_ids} )
Assign a LoadBalancer to a Droplet. Args: droplet_ids (obj:`list` of `int`): A list of Droplet IDs
def scan(xml): if xml.tag is et.Comment: yield {'type': COMMENT, 'text': xml.text} return if xml.tag is et.PI: if xml.text: yield {'type': PI, 'target': xml.target, 'text': xml.text} else: yield {'type': PI, 'target': xml.target} return obj = _elt2obj(xml) obj['type'] = ENTER yield obj assert type(xml.tag) is str, xml if xml.text: yield {'type': TEXT, 'text': xml.text} for c in xml: for x in scan(c): yield x if c.tail: yield {'type': TEXT, 'text': c.tail} yield {'type': EXIT}
Converts XML tree to event generator
def request_configuration_form(self): iq = Iq(to_jid = self.room_jid.bare(), stanza_type = "get") iq.new_query(MUC_OWNER_NS, "query") self.manager.stream.set_response_handlers( iq, self.process_configuration_form_success, self.process_configuration_form_error) self.manager.stream.send(iq) return iq.get_id()
Request a configuration form for the room. When the form is received `self.handler.configuration_form_received` will be called. When an error response is received then `self.handler.error` will be called. :return: id of the request stanza. :returntype: `unicode`
def _find_any(self, task_spec): tasks = [] if self.task_spec == task_spec: tasks.append(self) for child in self: if child.task_spec != task_spec: continue tasks.append(child) return tasks
Returns any descendants that have the given task spec assigned. :type task_spec: TaskSpec :param task_spec: The wanted task spec. :rtype: list(Task) :returns: The tasks objects that are attached to the given task spec.
def autoset_settings(set_var): try: devices = ast.literal_eval(os.environ["CUDA_VISIBLE_DEVICES"]) if type(devices) != list and type(devices) != tuple: devices = [devices] if len(devices) != 0: set_var.GPU = len(devices) set_var.NB_JOBS = len(devices) warnings.warn("Detecting CUDA devices : {}".format(devices)) except KeyError: set_var.GPU = check_cuda_devices() set_var.NB_JOBS = set_var.GPU warnings.warn("Detecting {} CUDA devices.".format(set_var.GPU)) if not set_var.GPU: warnings.warn("No GPU automatically detected. Setting SETTINGS.GPU to 0, " + "and SETTINGS.NB_JOBS to cpu_count.") set_var.GPU = 0 set_var.NB_JOBS = multiprocessing.cpu_count() return set_var
Autoset GPU parameters using CUDA_VISIBLE_DEVICES variables. Return default config if variable not set. :param set_var: Variable to set. Must be of type ConfigSettings
def configure(level=logging.INFO, logfile=None): for handler in Log.handlers: if isinstance(handler, logging.StreamHandler): Log.handlers.remove(handler) Log.setLevel(level) if logfile is not None: log_format = "[%(asctime)s] [%(levelname)s]: %(message)s" formatter = logging.Formatter(fmt=log_format, datefmt=date_format) file_handler = logging.FileHandler(logfile) file_handler.setFormatter(formatter) Log.addHandler(file_handler) else: log_format = "[%(asctime)s] %(log_color)s[%(levelname)s]%(reset)s: %(message)s" formatter = colorlog.ColoredFormatter(fmt=log_format, datefmt=date_format) stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) Log.addHandler(stream_handler)
Configure logger which dumps log on terminal :param level: logging level: info, warning, verbose... :type level: logging level :param logfile: log file name, default to None :type logfile: string :return: None :rtype: None
def register_chooser(self, chooser, **kwargs): if not issubclass(chooser, Chooser): return self.register_simple_chooser(chooser, **kwargs) self.choosers[chooser.model] = chooser(**kwargs) return chooser
Adds a model chooser definition to the registry.
def get_compound_afrs(self): result = self._compound_mfrs * 1.0 for compound in self.material.compounds: index = self.material.get_compound_index(compound) result[index] = stoich.amount(compound, result[index]) return result
Determine the amount flow rates of all the compounds. :returns: List of amount flow rates. [kmol/h]
def show(self): self.parent.addLayout(self._logSelectLayout) self.menuCount += 1 self._connectSlots()
Display menus and connect even signals.
def sys_openat(self, dirfd, buf, flags, mode): filename = self.current.read_string(buf) dirfd = ctypes.c_int32(dirfd).value if os.path.isabs(filename) or dirfd == self.FCNTL_FDCWD: return self.sys_open(buf, flags, mode) try: dir_entry = self._get_fd(dirfd) except FdError as e: logger.info("openat: Not valid file descriptor. Returning EBADF") return -e.err if not isinstance(dir_entry, Directory): logger.info("openat: Not directory descriptor. Returning ENOTDIR") return -errno.ENOTDIR dir_path = dir_entry.name filename = os.path.join(dir_path, filename) try: f = self._sys_open_get_file(filename, flags) logger.debug(f"Opening file {filename} for real fd {f.fileno()}") except IOError as e: logger.info(f"Could not open file {filename}. Reason: {e!s}") return -e.errno if e.errno is not None else -errno.EINVAL return self._open(f)
Openat SystemCall - Similar to open system call except dirfd argument when path contained in buf is relative, dirfd is referred to set the relative path Special value AT_FDCWD set for dirfd to set path relative to current directory :param dirfd: directory file descriptor to refer in case of relative path at buf :param buf: address of zero-terminated pathname :param flags: file access bits :param mode: file permission mode
def marketYesterdayDF(token='', version=''): x = marketYesterday(token, version) data = [] for key in x: data.append(x[key]) data[-1]['symbol'] = key df = pd.DataFrame(data) _toDatetime(df) _reindex(df, 'symbol') return df
This returns previous day adjusted price data for whole market https://iexcloud.io/docs/api/#previous-day-prices Available after 4am ET Tue-Sat Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
def getNextRecord(self, useCache=True): assert self._file is not None assert self._mode == self._FILE_READ_MODE try: line = self._reader.next() except StopIteration: if self.rewindAtEOF: if self._recordCount == 0: raise Exception("The source configured to reset at EOF but " "'%s' appears to be empty" % self._filename) self.rewind() line = self._reader.next() else: return None self._recordCount += 1 record = [] for i, f in enumerate(line): if f in self._missingValues: record.append(SENTINEL_VALUE_FOR_MISSING_DATA) else: record.append(self._adapters[i](f)) return record
Returns next available data record from the file. :returns: a data row (a list or tuple) if available; None, if no more records in the table (End of Stream - EOS); empty sequence (list or tuple) when timing out while waiting for the next record.
def dataReceived(self, data): self.resetTimeout() lines = (self._buffer + data).splitlines() if data.endswith(b'\n') or data.endswith(b'\r'): self._buffer = b'' else: self._buffer = lines.pop(-1) for line in lines: if self.transport.disconnecting: return if len(line) > self._max_length: self.lineLengthExceeded(line) return else: self.lineReceived(line) if len(self._buffer) > self._max_length: self.lineLengthExceeded(self._buffer) return
Translates bytes into lines, and calls lineReceived. Copied from ``twisted.protocols.basic.LineOnlyReceiver`` but using str.splitlines() to split on ``\r\n``, ``\n``, and ``\r``.
def extracted_array_2d_from_array_2d_and_coordinates(array_2d, y0, y1, x0, x1): new_shape = (y1-y0, x1-x0) resized_array = np.zeros(shape=new_shape) for y_resized, y in enumerate(range(y0, y1)): for x_resized, x in enumerate(range(x0, x1)): resized_array[y_resized, x_resized] = array_2d[y, x] return resized_array
Resize an array to a new size by extracting a sub-set of the array. The extracted input coordinates use NumPy convention, such that the upper values should be specified as +1 the \ dimensions of the extracted array. In the example below, an array of size (5,5) is extracted using the coordinates y0=1, y1=4, x0=1, x1=4. This extracts an array of dimensions (2,2) and is equivalent to array_2d[1:4, 1:4] Parameters ---------- array_2d : ndarray The 2D array that is an array is extracted from. y0 : int The lower row number (e.g. the higher y-coodinate) of the array that is extracted for the resize. y1 : int The upper row number (e.g. the lower y-coodinate) of the array that is extracted for the resize. x0 : int The lower column number (e.g. the lower x-coodinate) of the array that is extracted for the resize. x1 : int The upper column number (e.g. the higher x-coodinate) of the array that is extracted for the resize. Returns ------- ndarray The extracted 2D array from the input 2D array. Examples -------- array_2d = np.ones((5,5)) extracted_array = extract_array_2d(array_2d=array_2d, y0=1, y1=4, x0=1, x1=4)
def shuffle_cols(seqarr, newarr, cols): for idx in xrange(cols.shape[0]): newarr[:, idx] = seqarr[:, cols[idx]] return newarr
used in bootstrap resampling without a map file
def create_checksum_node(self, chksum): chksum_node = BNode() type_triple = (chksum_node, RDF.type, self.spdx_namespace.Checksum) self.graph.add(type_triple) algorithm_triple = (chksum_node, self.spdx_namespace.algorithm, Literal(chksum.identifier)) self.graph.add(algorithm_triple) value_triple = (chksum_node, self.spdx_namespace.checksumValue, Literal(chksum.value)) self.graph.add(value_triple) return chksum_node
Return a node representing spdx.checksum.