code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def uuid(self, type, val): picker = lambda x: x.get('uuid', x) return self._get((type, val), picker)
Return the item-uuid for a identifier
def fail_hosts(self, hosts_to_fail, activated_count=None): if not hosts_to_fail: return activated_count = activated_count or len(self.activated_hosts) logger.debug('Failing hosts: {0}'.format(', '.join( (host.name for host in hosts_to_fail), ))) self.active_hosts -= hosts_to_fail active_hosts = self.active_hosts if not active_hosts: raise PyinfraError('No hosts remaining!') if self.config.FAIL_PERCENT is not None: percent_failed = ( 1 - len(active_hosts) / activated_count ) * 100 if percent_failed > self.config.FAIL_PERCENT: raise PyinfraError('Over {0}% of hosts failed ({1}%)'.format( self.config.FAIL_PERCENT, int(round(percent_failed)), ))
Flag a ``set`` of hosts as failed, error for ``config.FAIL_PERCENT``.
def Initialize(self): super(GRRFlow, self).Initialize() self._client_version = None self._client_os = None self._client_knowledge_base = None if "r" in self.mode: state = self.Get(self.Schema.FLOW_STATE_DICT) self.context = self.Get(self.Schema.FLOW_CONTEXT) self.runner_args = self.Get(self.Schema.FLOW_RUNNER_ARGS) args = self.Get(self.Schema.FLOW_ARGS) if args: self.args = args.payload if state: self.state = AttributedDict(state.ToDict()) else: self.state = AttributedDict() self.Load() if self.state is None: self.state = AttributedDict()
The initialization method.
def _delay(self): if not self.next_scheduled: self.next_scheduled = self.clock_func() + self.interval return while True: current = self.clock_func() if current >= self.next_scheduled: extratime = current - self.next_scheduled self.next_scheduled = current + self.interval - extratime return delay_amt = self.next_scheduled - current if self.allow_negative_sleep or delay_amt >= 0: self.sleep_func(self.next_scheduled - current)
Delay for between zero and self.interval time units
def OnCellTextRotation(self, event): with undo.group(_("Rotation")): self.grid.actions.toggle_attr("angle") self.grid.ForceRefresh() self.grid.update_attribute_toolbar() if is_gtk(): try: wx.Yield() except: pass event.Skip()
Cell text rotation event handler
def filter_inactive_ports(query): port_model = models_v2.Port query = (query .filter(port_model.status == n_const.PORT_STATUS_ACTIVE)) return query
Filter ports that aren't in active status
def _irc_upper(self, in_string): conv_string = self._translate(in_string) if self._upper_trans is not None: conv_string = in_string.translate(self._upper_trans) return str.upper(conv_string)
Convert us to our upper-case equivalent, given our std.
def drop_all(self): log.info('dropping tables in %s', self.engine.url) self.session.commit() models.Base.metadata.drop_all(self.engine) self.session.commit()
Drops all tables in the database
def lookup_announce_alias(name): for alias, urls in announce.items(): if alias.lower() == name.lower(): return alias, urls raise KeyError("Unknown alias %s" % (name,))
Get canonical alias name and announce URL list for the given alias.
def parse_client_cert_pair(config_value): if not config_value: return client_cert = config_value.split(':') if len(client_cert) != 2: tips = ('client_cert should be formatted like ' '"/path/to/cert.pem:/path/to/key.pem"') raise ValueError('{0!r} is invalid.\n{1}'.format(config_value, tips)) return tuple(client_cert)
Parses the client cert pair from config item. :param config_value: the string value of config item. :returns: tuple or none.
def apply(self, node): new_node = self.run(node) return self.update, new_node
Apply transformation and return if an update happened.
def _check_classes(self): try: self.classes = self.imdbs[0].classes self.num_classes = len(self.classes) except AttributeError: pass if self.num_classes > 0: for db in self.imdbs: assert self.classes == db.classes, "Multiple imdb must have same classes"
check input imdbs, make sure they have same classes
def _list_paths(self, bucket, prefix): s3 = self.s3 kwargs = {"Bucket": bucket, "Prefix": prefix} if self.list_objects: list_objects_api = "list_objects" else: list_objects_api = "list_objects_v2" paginator = s3.get_paginator(list_objects_api) for page in paginator.paginate(**kwargs): contents = page.get("Contents", None) if not contents: continue for item in contents: yield item["Key"]
Read config for list object api, paginate through list objects.
def is_asdf(raw): reverse = raw[::-1] asdf = ''.join(ASDF) return raw in asdf or reverse in asdf
If the password is in the order on keyboard.
def get_vcl_html(self, service_id, version_number, name): content = self._fetch("/service/%s/version/%d/vcl/%s/content" % (service_id, version_number, name)) return content.get("content", None)
Get the uploaded VCL for a particular service and version with HTML syntax highlighting.
def sideral(date, longitude=0., model='mean', eop_correction=True, terms=106): theta = _sideral(date, longitude, model, eop_correction, terms) return rot3(np.deg2rad(-theta))
Sideral time as a rotation matrix
def _setDefaults(self): for configName, configDict in self.configs.items(): self._setConfig(configName, configDict['default'])
Sets all the expected configuration options on the config object as either the requested default value, or None.
def update(uid, post_data): raw_rec = TabTag.get(TabTag.uid == uid) entry = TabTag.update( name=post_data['name'] if 'name' in post_data else raw_rec.name, slug=post_data['slug'] if 'slug' in post_data else raw_rec.slug, order=post_data['order'] if 'order' in post_data else raw_rec.order, kind=post_data['kind'] if 'kind' in post_data else raw_rec.kind, pid=post_data['pid'], ).where(TabTag.uid == uid) entry.execute()
Update the category.
def save_and_restore(self, _func=None, **config_values): functools = self._modules['functools'] if not _func: return functools.partial(self.save_and_restore, **config_values) @functools.wraps(_func) def _saving_wrapper(*args, **kwargs): saved_config = dict(self._loaded_values) try: self.load_from_dict(config_values) return _func(*args, **kwargs) finally: self._loaded_values = saved_config return _saving_wrapper
Decorator for saving conf state and restoring it after a function. This decorator is primarily for use in tests, where conf keys may be updated for individual test cases, but those values need to be reverted after the test case is done. Examples: conf.declare('my_conf_key') @conf.save_and_restore def MyTestFunc(): conf.load(my_conf_key='baz') SomeFuncUnderTestThatUsesMyConfKey() conf.load(my_conf_key='foo') MyTestFunc() print conf.my_conf_key # Prints 'foo', *NOT* 'baz' # Without the save_and_restore decorator, MyTestFunc() would have had the # side effect of altering the conf value of 'my_conf_key' to 'baz'. # Config keys can also be initialized for the context inline at decoration # time. This is the same as setting them at the beginning of the # function, but is a little clearer syntax if you know ahead of time what # config keys and values you need to set. @conf.save_and_restore(my_conf_key='baz') def MyOtherTestFunc(): print conf.my_conf_key # Prints 'baz' MyOtherTestFunc() print conf.my_conf_key # Prints 'foo' again, for the same reason. Args: _func: The function to wrap. The returned wrapper will invoke the function and restore the config to the state it was in at invocation. **config_values: Config keys can be set inline at decoration time, see examples. Note that config keys can't begin with underscore, so there can be no name collision with _func. Returns: Wrapper to replace _func, as per Python decorator semantics.
def go_to_next_cell(self): cursor = self.textCursor() cursor.movePosition(QTextCursor.NextBlock) cur_pos = prev_pos = cursor.position() while not self.is_cell_separator(cursor): cursor.movePosition(QTextCursor.NextBlock) prev_pos = cur_pos cur_pos = cursor.position() if cur_pos == prev_pos: return self.setTextCursor(cursor)
Go to the next cell of lines
def get_tensors(object_): if torch.is_tensor(object_): return [object_] elif isinstance(object_, (str, float, int)): return [] tensors = set() if isinstance(object_, collections.abc.Mapping): for value in object_.values(): tensors.update(get_tensors(value)) elif isinstance(object_, collections.abc.Iterable): for value in object_: tensors.update(get_tensors(value)) else: members = [ value for key, value in inspect.getmembers(object_) if not isinstance(value, (collections.abc.Callable, type(None))) ] tensors.update(get_tensors(members)) return tensors
Get all tensors associated with ``object_`` Args: object_ (any): Any object to look for tensors. Returns: (list of torch.tensor): List of tensors that are associated with ``object_``.
def validate_description(xml_data): try: root = ET.fromstring('<document>' + xml_data + '</document>') except StdlibParseError as e: raise ParseError(str(e)) return _parse_desc(root)
Validate the description for validity
def formatfooter(self, previous_month, next_month): footer = '<tfoot><tr>' \ '<td colspan="3" class="prev">%s</td>' \ '<td class="pad">&nbsp;</td>' \ '<td colspan="3" class="next">%s</td>' \ '</tr></tfoot>' if previous_month: previous_content = '<a href="%s" class="previous-month">%s</a>' % ( reverse('zinnia:entry_archive_month', args=[ previous_month.strftime('%Y'), previous_month.strftime('%m')]), date_format(previous_month, 'YEAR_MONTH_FORMAT')) else: previous_content = '&nbsp;' if next_month: next_content = '<a href="%s" class="next-month">%s</a>' % ( reverse('zinnia:entry_archive_month', args=[ next_month.strftime('%Y'), next_month.strftime('%m')]), date_format(next_month, 'YEAR_MONTH_FORMAT')) else: next_content = '&nbsp;' return footer % (previous_content, next_content)
Return a footer for a previous and next month.
def second_order_score(y, mean, scale, shape, skewness): return ((y-mean)/float(scale*np.abs(y-mean))) / (-(np.power(y-mean,2) - np.power(np.abs(mean-y),2))/(scale*np.power(np.abs(mean-y),3)))
GAS Laplace Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Laplace distribution scale : float scale parameter for the Laplace distribution shape : float tail thickness parameter for the Laplace distribution skewness : float skewness parameter for the Laplace distribution Returns ---------- - Adjusted score of the Laplace family
def set_text(self, text): text = text.strip() new_text = self.text() + text self.setText(new_text)
Set the filter text.
async def start_worker( self, cmd: List[str], input_source: str, output: Optional[str] = None, extra_cmd: Optional[str] = None, pattern: Optional[str] = None, reading: str = FFMPEG_STDERR, ) -> None: if self.is_running: _LOGGER.warning("Can't start worker. It is allready running!") return if reading == FFMPEG_STDERR: stdout = False stderr = True else: stdout = True stderr = False await self.open( cmd=cmd, input_source=input_source, output=output, extra_cmd=extra_cmd, stdout_pipe=stdout, stderr_pipe=stderr, ) self._input = await self.get_reader(reading) self._read_task = self._loop.create_task(self._process_lines(pattern)) self._loop.create_task(self._worker_process())
Start ffmpeg do process data from output.
def id_to_root_name(id): name = root_names.get(id) if not name: name = repr(id) return name
Convert a PDG ID to a string with root markup.
def footer_length(header): footer_length = 0 if header.algorithm.signing_algorithm_info is not None: footer_length += 2 footer_length += header.algorithm.signature_len return footer_length
Calculates the ciphertext message footer length, given a complete header. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :rtype: int
def rotate_around(self, axis, theta): x, y, z = self.x, self.y, self.z u, v, w = axis.x, axis.y, axis.z r2 = u**2 + v**2 + w**2 r = math.sqrt(r2) ct = math.cos(theta) st = math.sin(theta) / r dt = (u * x + v * y + w * z) * (1 - ct) / r2 return Vector3((u * dt + x * ct + (-w * y + v * z) * st), (v * dt + y * ct + (w * x - u * z) * st), (w * dt + z * ct + (-v * x + u * y) * st))
Return the vector rotated around axis through angle theta. Right hand rule applies.
def __get_min_reads(current_provisioning, min_provisioned_reads, log_tag): reads = 1 if min_provisioned_reads: reads = int(min_provisioned_reads) if reads > int(current_provisioning * 2): reads = int(current_provisioning * 2) logger.debug( '{0} - ' 'Cannot reach min-provisioned-reads as max scale up ' 'is 100% of current provisioning'.format(log_tag)) logger.debug( '{0} - Setting min provisioned reads to {1}'.format( log_tag, min_provisioned_reads)) return reads
Get the minimum number of reads to current_provisioning :type current_provisioning: int :param current_provisioning: Current provisioned reads :type min_provisioned_reads: int :param min_provisioned_reads: Configured min provisioned reads :type log_tag: str :param log_tag: Prefix for the log :returns: int -- Minimum number of reads
def main(*args): args = args or sys.argv[1:] params = PARSER.parse_args(args) from .log import setup_logging setup_logging(params.level.upper()) from .core import Starter starter = Starter(params) if not starter.params.TEMPLATES or starter.params.list: setup_logging('WARN') for t in sorted(starter.iterate_templates()): logging.warn("%s -- %s", t.name, t.params.get( 'description', 'no description')) return True try: starter.copy() except Exception as e: logging.error(e) sys.exit(1)
Enter point.
def create_index(self, indexname=None, index_conf=None): if indexname is None: indexname = self.index_name log.debug("Creating new index: '{0}'".format(indexname)) if index_conf is None: index_conf = {'settings': self.settings, 'mappings': {'book': {'properties': self.properties}}} try: self.es.indices.create(index=indexname, body=index_conf) except TransportError as te: if te.error.startswith("IndexAlreadyExistsException"): raise Exception("Cannot create index '{}', already exists".format(indexname)) else: raise
Create the index Create the index with given configuration. If `indexname` is provided it will be used as the new index name instead of the class one (:py:attr:`DB.index_name`) :param index_conf: configuration to be used in index creation. If this is not specified the default index configuration will be used. :raises Exception: if the index already exists.
def get_changeset(self, **options): cid = options.get('changeset_id', None) return self.repo.get_changeset(cid)
Returns changeset for given ``options``.
def setCurrentIndex(self, index): super(XViewPanel, self).setCurrentIndex(index) self.tabBar().setCurrentIndex(index)
Sets the current index on self and on the tab bar to keep the two insync. :param index | <int>
def process_checkpoint(self, msg: Checkpoint, sender: str) -> bool: self.logger.info('{} processing checkpoint {} from {}'.format(self, msg, sender)) result, reason = self.validator.validate_checkpoint_msg(msg) if result == DISCARD: self.discard(msg, "{} discard message {} from {} " "with the reason: {}".format(self, msg, sender, reason), self.logger.trace) elif result == PROCESS: self._do_process_checkpoint(msg, sender) else: self.logger.debug("{} stashing checkpoint message {} with " "the reason: {}".format(self, msg, reason)) self.stasher.stash((msg, sender), result) return False return True
Process checkpoint messages :return: whether processed (True) or stashed (False)
def get_crimes_no_location(self, force, date=None, category=None): if not isinstance(force, Force): force = Force(self, id=force) if isinstance(category, CrimeCategory): category = category.id kwargs = { 'force': force.id, 'category': category or 'all-crime', } crimes = [] if date is not None: kwargs['date'] = date for c in self.service.request('GET', 'crimes-no-location', **kwargs): crimes.append(NoLocationCrime(self, data=c)) return crimes
Get crimes with no location for a force. Uses the crimes-no-location_ API call. .. _crimes-no-location: https://data.police.uk/docs/method/crimes-no-location/ :rtype: list :param force: The force to get no-location crimes for. :type force: str or Force :param date: The month in which the crimes were reported in the format ``YYYY-MM`` (the latest date is used if ``None``). :type date: str or None :param category: The category of the crimes to filter by (either by ID or CrimeCategory object) :type category: str or CrimeCategory :return: A ``list`` of :class:`crime.NoLocationCrime` objects which were reported in the given month, by the specified force, but which don't have a location.
def _clear_state(self, seed=None): self.start_time = time() self.run_stats = [] self.best_index = -1 self.best_score = -1 self.best_config = None self.search_space = None if seed is not None: self.rng = random.Random(seed)
Clears the state, starts clock
def match_events(ref, est, window, distance=None): if distance is not None: hits = np.where(distance(ref, est) <= window) else: hits = _fast_hit_windows(ref, est, window) G = {} for ref_i, est_i in zip(*hits): if est_i not in G: G[est_i] = [] G[est_i].append(ref_i) matching = sorted(_bipartite_match(G).items()) return matching
Compute a maximum matching between reference and estimated event times, subject to a window constraint. Given two lists of event times ``ref`` and ``est``, we seek the largest set of correspondences ``(ref[i], est[j])`` such that ``distance(ref[i], est[j]) <= window``, and each ``ref[i]`` and ``est[j]`` is matched at most once. This is useful for computing precision/recall metrics in beat tracking, onset detection, and segmentation. Parameters ---------- ref : np.ndarray, shape=(n,) Array of reference values est : np.ndarray, shape=(m,) Array of estimated values window : float > 0 Size of the window. distance : function function that computes the outer distance of ref and est. By default uses ``|ref[i] - est[j]|`` Returns ------- matching : list of tuples A list of matched reference and event numbers. ``matching[i] == (i, j)`` where ``ref[i]`` matches ``est[j]``.
def pvector_field(item_type, optional=False, initial=()): return _sequence_field(CheckedPVector, item_type, optional, initial)
Create checked ``PVector`` field. :param item_type: The required type for the items in the vector. :param optional: If true, ``None`` can be used as a value for this field. :param initial: Initial value to pass to factory if no value is given for the field. :return: A ``field`` containing a ``CheckedPVector`` of the given type.
def reprocess_tree_node(self, tree_node, tx_context=None): if not tx_context: tx_context = collections.defaultdict(dict) if tree_node.parent is None: return tx_context if tree_node.timeperiod in tx_context[tree_node.process_name]: return tx_context if tree_node.job_record.is_embryo: pass else: state_machine_name = context.process_context[tree_node.process_name].state_machine_name state_machine = self.state_machines[state_machine_name] state_machine.reprocess_job(tree_node.job_record) tx_context[tree_node.process_name][tree_node.timeperiod] = tree_node self.reprocess_tree_node(tree_node.parent, tx_context) dependant_nodes = self._find_dependant_tree_nodes(tree_node) for node in dependant_nodes: self.reprocess_tree_node(node, tx_context) return tx_context
method reprocesses the node and all its dependants and parent nodes
def user_list_membership(self, username, member_type="USER", recursive=True, max_return_count=999): return self.client.service.getUserListMembership( username, member_type, recursive, max_return_count, self.proxy_id )
Get info for lists a user is a member of. This is similar to :meth:`user_lists` but with a few differences: #. It returns list info objects instead of list names. #. It has an option to fully resolve a user's list hierarchy. That is, if a user is a member of a nested list, this method can retrieve both the nested list and the parent lists that contain the nested list. Args: username (str): The MIT username of the user member_type(str): The type of user, "USER" or "STRING" recursive(bool): Whether to fully resolve the list hierarchy max_return_count(int): limit the number of items returned Returns: list of dicts: info dicts, one per list.
def remove_rules(self, description): rm = [] description = description.lower() for i in range(0, len(self.extract_rules)): if self.extract_rules[i]['regex'].search(description): rm.append(i) for i in rm: self.extract_rules.pop(i) return len(rm)
Remove all rules that match a specified description. @description - The description to match against. Returns the number of rules removed.
def get_tags(self): res = self.get_request('/tag') return [Tag(cloud_manager=self, **tag) for tag in res['tags']['tag']]
List all tags as Tag objects.
def readTempC(self): t = self._device.readU16BE(MCP9808_REG_AMBIENT_TEMP) self._logger.debug('Raw ambient temp register value: 0x{0:04X}'.format(t & 0xFFFF)) temp = (t & 0x0FFF) / 16.0 if t & 0x1000: temp -= 256.0 return temp
Read sensor and return its value in degrees celsius.
def _RecurseKey(self, recur_item, root='', depth=15): if depth < 1: logger.debug('Recursion limit hit for key: {0:s}'.format(root)) return if isinstance(recur_item, (list, tuple)): for recur in recur_item: for key in self._RecurseKey(recur, root, depth): yield key return if not hasattr(recur_item, 'iteritems'): return for key, value in iter(recur_item.items()): yield root, key, value if isinstance(value, dict): value = [value] if isinstance(value, list): for item in value: if isinstance(item, dict): for keyval in self._RecurseKey( item, root=root + '/' + key, depth=depth - 1): yield keyval
Flattens nested dictionaries and lists by yielding their values. The hierarchy of a bencode file is a series of nested dictionaries and lists. This is a helper function helps plugins navigate the structure without having to reimplement their own recursive methods. This method implements an overridable depth limit to prevent processing extremely deeply nested dictionaries. If the limit is reached a debug message is logged indicating which key processing stopped on. Args: recur_item (object): object to be checked for additional nested items. root (str): the pathname of the current working key. depth (int): a counter to ensure we stop at the maximum recursion depth. Yields: tuple: containing: str: root str: key str: value
def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]): try: issue_num = int(text) if issue_num <= 0: raise ValueError except ValueError: msg = inliner.reporter.error( 'GitHub issue number must be a number greater than or equal to 1; ' '"%s" is invalid.' % text, line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] app = inliner.document.settings.env.app if 'pull' in name.lower(): category = 'pull' elif 'issue' in name.lower(): category = 'issues' else: msg = inliner.reporter.error( 'GitHub roles include "ghpull" and "ghissue", ' '"%s" is invalid.' % name, line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] node = make_link_node(rawtext, app, category, str(issue_num), options) return [node], []
Link to a GitHub issue. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization.
def reverse_tree(tree): rtree = defaultdict(list) child_keys = set(c.key for c in flatten(tree.values())) for k, vs in tree.items(): for v in vs: node = find_tree_root(rtree, v.key) or v rtree[node].append(k.as_required_by(v)) if k.key not in child_keys: rtree[k.as_requirement()] = [] return rtree
Reverse the dependency tree. ie. the keys of the resulting dict are objects of type ReqPackage and the values are lists of DistPackage objects. :param dict tree: the pkg dependency tree obtained by calling `construct_tree` function :returns: reversed tree :rtype: dict
def remove_temp_copy(self): if self.is_temp and self.root_dir is not None: shutil.rmtree(self.root_dir) self.root_dir = None
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
def parse_cluster(self, global_params, region, cluster): cluster_name = cluster.pop('CacheClusterId') cluster['name'] = cluster_name if 'CacheSubnetGroupName' in cluster: subnet_group = api_clients[region].describe_cache_subnet_groups(CacheSubnetGroupName = cluster['CacheSubnetGroupName'])['CacheSubnetGroups'][0] vpc_id = subnet_group['VpcId'] else: vpc_id = ec2_classic subnet_group = None manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types)) self.vpcs[vpc_id].clusters[cluster_name] = cluster if subnet_group: self.vpcs[vpc_id].subnet_groups[subnet_group['CacheSubnetGroupName']] = subnet_group
Parse a single ElastiCache cluster :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param cluster: ElastiCache cluster
def wrap(self, width): res = [] prev_state = set() part = [] cwidth = 0 for char, _width, state in zip(self._string, self._width, self._state): if cwidth + _width > width: if prev_state: part.append(self.ANSI_RESET) res.append("".join(part)) prev_state = set() part = [] cwidth = 0 cwidth += _width if prev_state == state: pass elif prev_state <= state: part.extend(state - prev_state) else: part.append(self.ANSI_RESET) part.extend(state) prev_state = state part.append(char) if prev_state: part.append(self.ANSI_RESET) if part: res.append("".join(part)) return res
Returns a partition of the string based on `width`
def unkown_field(self, value=None): if value is not None: try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str ' 'for field `unkown_field`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `unkown_field`') self._unkown_field = value
Corresponds to IDD Field `unkown_field` Empty field in data. Args: value (str): value for IDD Field `unkown_field` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def parse_request_body(self, body): PARSING_FUNCTIONS = { 'application/json': json.loads, 'text/json': json.loads, 'application/x-www-form-urlencoded': self.parse_querystring, } content_type = self.headers.get('content-type', '') do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION) try: body = decode_utf8(body) return do_parse(body) except (Exception, BaseException): return body
Attempt to parse the post based on the content-type passed. Return the regular body if not :param body: string :returns: a python object such as dict or list in case the deserialization suceeded. Else returns the given param ``body``
def handle_current_state(self): if getattr(self, '_current_state_hydrated_changed', False) and self.save_on_change: new_base_state = json.dumps(getattr(self, '_current_state_hydrated', {})) if new_base_state != self.base_state: self.base_state = new_base_state self.save()
Check to see if the current hydrated state and the saved state are different. If they are, then persist the current state in the database by saving the model instance.
def create_superuser( self, username, email, short_name, full_name, institute, password, **extra_fields): return self._create_user( username=username, email=email, institute=institute, password=password, short_name=short_name, full_name=full_name, is_admin=True, **extra_fields)
Creates a new person with super powers.
def python_value(self, value): value = super(JSONField, self).python_value(value) if value is not None: return flask.json.loads(value, **self._load_kwargs)
Return the JSON in the database as a ``dict``. Returns: dict: The field run through json.loads
def on_replace_scene(self, event: events.ReplaceScene, signal): self.stop_scene() self.start_scene(event.new_scene, event.kwargs)
Replace the running scene with a new one.
def set_experiment_winner(experiment): redis = _get_redis_connection() experiment = Experiment.find(redis, experiment) if experiment: alternative_name = request.form.get('alternative') alternative = Alternative(redis, alternative_name, experiment.name) if alternative.name in experiment.alternative_names: experiment.winner = alternative.name return redirect(url_for('.index'))
Mark an alternative as the winner of the experiment.
def append(self, cls, infer_hidden: bool = False, **kwargs) -> Encoder: params = dict(kwargs) if infer_hidden: params['num_hidden'] = self.get_num_hidden() sig_params = inspect.signature(cls.__init__).parameters if 'dtype' in sig_params and 'dtype' not in kwargs: params['dtype'] = self.dtype encoder = cls(**params) self.encoders.append(encoder) return encoder
Extends sequence with new Encoder. 'dtype' gets passed into Encoder instance if not present in parameters and supported by specific Encoder type. :param cls: Encoder type. :param infer_hidden: If number of hidden should be inferred from previous encoder. :param kwargs: Named arbitrary parameters for Encoder. :return: Instance of Encoder.
def _delete_network(self, request, network): try: api.neutron.network_delete(request, network.id) LOG.debug('Delete the created network %s ' 'due to subnet creation failure.', network.id) msg = _('Delete the created network "%s" ' 'due to subnet creation failure.') % network.name redirect = self.get_failure_url() messages.info(request, msg) raise exceptions.Http302(redirect) except Exception as e: LOG.info('Failed to delete network %(id)s: %(exc)s', {'id': network.id, 'exc': e}) msg = _('Failed to delete network "%s"') % network.name redirect = self.get_failure_url() exceptions.handle(request, msg, redirect=redirect)
Delete the created network when subnet creation failed.
def FromMilliseconds(self, millis): self._NormalizeDuration( millis // _MILLIS_PER_SECOND, (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND)
Converts milliseconds to Duration.
def ready(zone): ret = {'status': True} res = __salt__['cmd.run_all']('zoneadm {zone} ready'.format( zone='-u {0}'.format(zone) if _is_uuid(zone) else '-z {0}'.format(zone), )) ret['status'] = res['retcode'] == 0 ret['message'] = res['stdout'] if ret['status'] else res['stderr'] ret['message'] = ret['message'].replace('zoneadm: ', '') if ret['message'] == '': del ret['message'] return ret
Prepares a zone for running applications. zone : string name or uuid of the zone CLI Example: .. code-block:: bash salt '*' zoneadm.ready clementine
def gauss_box_model(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5): z = (x - mean) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev return amplitude * (norm.cdf(z2) - norm.cdf(z1))
Integrate a Gaussian profile.
def _get_rating(self, entry): r_info = '' for string in entry[2].strings: r_info += string rating, share = r_info.split('/') return (rating, share.strip('*'))
Get the rating and share for a specific row
def groups(self, user, include=None): return self._query_zendesk(self.endpoint.groups, 'group', id=user, include=include)
Retrieve the groups for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id
def replace_text_dir(self, directory, to_replace, replacement, file_type=None): if not file_type: file_type = "*.tex" for file in glob.iglob(os.path.join(directory, file_type)): self.replace_text(file, to_replace, replacement)
Replaces a string with its replacement in all the files in the directory :param directory: the directory in which the files have to be modified :param to_replace: the string to be replaced in the files :param replacement: the string which replaces 'to_replace' in the files :param file_type: file pattern to match the files in which the string has to be replaced
def raise_error_if_unphysical(f): def wrapper(self, *args, **kwargs): if self.k_vrh < 0 or self.g_vrh < 0: raise ValueError("Bulk or shear modulus is negative, property " "cannot be determined") return f(self, *args, **kwargs) return wrapper
Wrapper for functions or properties that should raise an error if tensor is unphysical.
def initialize_private_canvas(self, private_canvas): if self.t_.get('show_pan_position', False): self.show_pan_mark(True) if self.t_.get('show_focus_indicator', False): self.show_focus_indicator(True)
Initialize the private canvas used by this instance.
def stop(self): if not self.running: return self.logger.debug("stopping main task of %r", self, stack_info=True) self._main_task.cancel()
Stop the client. This sends a signal to the clients main task which makes it terminate. It may take some cycles through the event loop to stop the client task. To check whether the task has actually stopped, query :attr:`running`.
def _adjust_nstep(n_step, gamma, obs, actions, rewards, new_obs, dones): assert not any(dones[:-1]), "Unexpected done in middle of trajectory" traj_length = len(rewards) for i in range(traj_length): for j in range(1, n_step): if i + j < traj_length: new_obs[i] = new_obs[i + j] dones[i] = dones[i + j] rewards[i] += gamma**j * rewards[i + j]
Rewrites the given trajectory fragments to encode n-step rewards. reward[i] = ( reward[i] * gamma**0 + reward[i+1] * gamma**1 + ... + reward[i+n_step-1] * gamma**(n_step-1)) The ith new_obs is also adjusted to point to the (i+n_step-1)'th new obs. At the end of the trajectory, n is truncated to fit in the traj length.
def _cmp_bystrlen_reverse(a, b): if len(a) > len(b): return -1 elif len(a) < len(b): return 1 else: return 0
A private "cmp" function to be used by the "sort" function of a list when ordering the titles found in a knowledge base by string- length - LONGEST -> SHORTEST. @param a: (string) @param b: (string) @return: (integer) - 0 if len(a) == len(b); 1 if len(a) < len(b); -1 if len(a) > len(b);
def decode(self, data): return Zchunk(lib.zarmour_decode(self._as_parameter_, data), True)
Decode an armoured string into a chunk. The decoded output is null-terminated, so it may be treated as a string, if that's what it was prior to encoding.
def duration(self): if not self.started: return None start = self.started end = self.completed if not end: end = datetime.utcnow() return end - start
Return a timedelta for this task. Measure the time between this task's start and end time, or "now" if the task has not yet finished. :returns: timedelta object, or None if the task has not even started.
def check_dynamic_route_exists(pattern, routes_to_check, parameters): for ndx, route in enumerate(routes_to_check): if route.pattern == pattern and route.parameters == parameters: return ndx, route else: return -1, None
Check if a URL pattern exists in a list of routes provided based on the comparison of URL pattern and the parameters. :param pattern: URL parameter pattern :param routes_to_check: list of dynamic routes either hashable or unhashable routes. :param parameters: List of :class:`Parameter` items :return: Tuple of index and route if matching route exists else -1 for index and None for route
def add_hostname_cn(self): ip = unit_get('private-address') addresses = [ip] vip = get_vip_in_network(resolve_network_cidr(ip)) if vip: addresses.append(vip) self.hostname_entry = { 'cn': get_hostname(ip), 'addresses': addresses}
Add a request for the hostname of the machine
def generator_to_list(fn): def wrapper(*args, **kw): return list(fn(*args, **kw)) return wrapper
This decorator is for flat_list function. It converts returned generator to list.
def update_voice_model(self, customization_id, name=None, description=None, words=None, **kwargs): if customization_id is None: raise ValueError('customization_id must be provided') if words is not None: words = [self._convert_model(x, Word) for x in words] headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('text_to_speech', 'V1', 'update_voice_model') headers.update(sdk_headers) data = {'name': name, 'description': description, 'words': words} url = '/v1/customizations/{0}'.format( *self._encode_path_vars(customization_id)) response = self.request( method='POST', url=url, headers=headers, json=data, accept_json=True) return response
Update a custom model. Updates information for the specified custom voice model. You can update metadata such as the name and description of the voice model. You can also update the words in the model and their translations. Adding a new translation for a word that already exists in a custom model overwrites the word's existing translation. A custom model can contain no more than 20,000 entries. You must use credentials for the instance of the service that owns a model to update it. You can define sounds-like or phonetic translations for words. A sounds-like translation consists of one or more words that, when combined, sound like the word. Phonetic translations are based on the SSML phoneme format for representing a word. You can specify them in standard International Phonetic Alphabet (IPA) representation <code>&lt;phoneme alphabet=\"ipa\" ph=\"t&#601;m&#712;&#593;to\"&gt;&lt;/phoneme&gt;</code> or in the proprietary IBM Symbolic Phonetic Representation (SPR) <code>&lt;phoneme alphabet=\"ibm\" ph=\"1gAstroEntxrYFXs\"&gt;&lt;/phoneme&gt;</code> **Note:** This method is currently a beta release. **See also:** * [Updating a custom model](https://cloud.ibm.com/docs/services/text-to-speech/custom-models.html#cuModelsUpdate) * [Adding words to a Japanese custom model](https://cloud.ibm.com/docs/services/text-to-speech/custom-entries.html#cuJapaneseAdd) * [Understanding customization](https://cloud.ibm.com/docs/services/text-to-speech/custom-intro.html). :param str customization_id: The customization ID (GUID) of the custom voice model. You must make the request with service credentials created for the instance of the service that owns the custom model. :param str name: A new name for the custom voice model. :param str description: A new description for the custom voice model. :param list[Word] words: An array of `Word` objects that provides the words and their translations that are to be added or updated for the custom voice model. Pass an empty array to make no additions or updates. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def _sideral(date, longitude=0., model='mean', eop_correction=True, terms=106): t = date.change_scale('UT1').julian_century theta = 67310.54841 + (876600 * 3600 + 8640184.812866) * t + 0.093104 * t ** 2\ - 6.2e-6 * t ** 3 theta /= 240. if model == 'apparent': theta += equinox(date, eop_correction, terms) theta += longitude theta %= 360. return theta
Get the sideral time at a defined date Args: date (Date): longitude (float): Longitude of the observer (in degrees) East positive/West negative. model (str): 'mean' or 'apparent' for GMST and GAST respectively Return: float: Sideral time in degrees GMST: Greenwich Mean Sideral Time LST: Local Sideral Time (Mean) GAST: Greenwich Apparent Sideral Time
def _parse_image_multilogs_string(config, ret, repo): image_logs, infos = [], None if ret and ret.strip().startswith('{') and ret.strip().endswith('}'): pushd = 0 buf = '' for char in ret: buf += char if char == '{': pushd += 1 if char == '}': pushd -= 1 if pushd == 0: try: buf = json.loads(buf) except Exception: pass image_logs.append(buf) buf = '' image_logs.reverse() for l in image_logs: if isinstance(l, dict): if l.get('status') == 'Download complete' and l.get('id'): infos = _get_image_infos(config, repo) break return image_logs, infos
Parse image log strings into grokable data
def shift_up(self, times=1): try: return Location(self._rank + times, self._file) except IndexError as e: raise IndexError(e)
Finds Location shifted up by 1 :rtype: Location
def create_stemmer(self, isDev=False): words = self.get_words(isDev) dictionary = ArrayDictionary(words) stemmer = Stemmer(dictionary) resultCache = ArrayCache() cachedStemmer = CachedStemmer(resultCache, stemmer) return cachedStemmer
Returns Stemmer instance
def setup_tree(ctx, verbose=None, root=None, tree_dir=None, modules_dir=None): print('Setting up the tree') ctx.run('python bin/setup_tree.py -t {0} -r {1} -m {2}'.format(tree_dir, root, modules_dir))
Sets up the SDSS tree enviroment
def pipeline_id_from_name(name, region=None, key=None, keyid=None, profile=None): r = {} result_pipelines = list_pipelines() if 'error' in result_pipelines: return result_pipelines for pipeline in result_pipelines['result']: if pipeline['name'] == name: r['result'] = pipeline['id'] return r r['error'] = 'No pipeline found with name={0}'.format(name) return r
Get the pipeline id, if it exists, for the given name. CLI example: .. code-block:: bash salt myminion boto_datapipeline.pipeline_id_from_name my_pipeline_name
def stitchModules(module, fallbackModule): for name, attr in fallbackModule.__dict__.items(): if name not in module.__dict__: module.__dict__[name] = attr
complete missing attributes with those in fallbackModule imagine you have 2 modules: a and b a is some kind of an individualised module of b - but will maybe not contain all attributes of b. in this case a should use the attributes from b >>> a.var1 = 'individual 1' # what we now want is to all all missing attributes from b to a: >>> stitchModules(a,b) >>> print a.var1 individual 1 >>> print a.var2 standard 2
def _print_download_progress_msg(self, msg, flush=False): if self._interactive_mode(): self._max_prog_str = max(self._max_prog_str, len(msg)) sys.stdout.write("\r%-{}s".format(self._max_prog_str) % msg) sys.stdout.flush() if flush: print("\n") else: logging.info(msg)
Prints a message about download progress either to the console or TF log. Args: msg: Message to print. flush: Indicates whether to flush the output (only used in interactive mode).
def template_filter(self, param=None): def deco(func): name = param or func.__name__ self.filters[name] = func return func return deco
Returns a decorator that adds the wrapped function to dictionary of template filters. The wrapped function is keyed by either the supplied param (if supplied) or by the wrapped functions name. :param param: Optional name to use instead of the name of the function to be wrapped :return: A decorator to wrap a template filter function :rtype: callable
def update(self): if not self.calendar_id: return False url = self.build_url(self._endpoints.get('calendar')) data = { self._cc('name'): self.name, self._cc('color'): self._cc(self.color.value if isinstance(self.color, CalendarColor) else self.color) } response = self.con.patch(url, data=data) return bool(response)
Updates this calendar. Only name and color can be changed. :return: Success / Failure :rtype: bool
def as_child(cls, global_config, parent=None): try: setproctitle('rejester worker') random.seed() yakonfig.set_default_config([yakonfig, dblogger, rejester], config=global_config) worker = cls(yakonfig.get_global_config(rejester.config_name)) worker.register(parent=parent) did_work = worker.run(set_title=True) worker.unregister() if did_work: sys.exit(cls.EXIT_SUCCESS) else: sys.exit(cls.EXIT_BORED) except Exception, e: if len(logging.root.handlers) > 0: logger.critical('failed to do any work', exc_info=e) sys.exit(cls.EXIT_EXCEPTION)
Run a single job in a child process. This method never returns; it always calls :func:`sys.exit` with an error code that says what it did.
def getDisplayName(self): if self.alias == "": return self.name return self.name + " as " + self.alias
Provides a name for display purpose respecting the alias
def boost_error_level(version, error, segments, eci, is_sa=False): if error not in (consts.ERROR_LEVEL_H, None) and len(segments) == 1: levels = [consts.ERROR_LEVEL_L, consts.ERROR_LEVEL_M, consts.ERROR_LEVEL_Q, consts.ERROR_LEVEL_H] if version < 1: levels.pop() if version < consts.VERSION_M4: levels.pop() data_length = segments.bit_length_with_overhead(version, eci, is_sa=is_sa) for level in levels[levels.index(error)+1:]: try: found = consts.SYMBOL_CAPACITY[version][level] >= data_length except KeyError: break if found: error = level return error
\ Increases the error level if possible. :param int version: Version constant. :param int|None error: Error level constant or ``None`` :param Segments segments: Instance of :py:class:`Segments` :param bool eci: Indicates if ECI designator should be written. :param bool is_sa: Indicates if Structured Append mode ist used.
def get_url(self, datatype, verb, urltype, params={}, api_host=None, api_version=None): api_version = api_version or 'v1' api_host = api_host or self.host subst = params.copy() subst['api_host'] = api_host subst['api_version'] = api_version url = "https://{api_host}/services/api/{api_version}" url += self.get_url_path(datatype, verb, urltype, params, api_version) return url.format(**subst)
Returns a fully formed url :param datatype: a string identifying the data the url will access. :param verb: the HTTP verb needed for use with the url. :param urltype: an adjective used to the nature of the request. :param \*\*params: substitution variables for the URL. :return: string :rtype: A fully formed url.
def set(self, key: bytes, value: bytes) -> Tuple[Hash32]: validate_is_bytes(key) validate_length(key, self._key_size) validate_is_bytes(value) path = to_int(key) node = value _, branch = self._get(key) proof_update = [] target_bit = 1 for sibling_node in reversed(branch): node_hash = keccak(node) proof_update.append(node_hash) self.db[node_hash] = node if (path & target_bit): node = sibling_node + node_hash else: node = node_hash + sibling_node target_bit <<= 1 self.root_hash = keccak(node) self.db[self.root_hash] = node return tuple(reversed(proof_update))
Returns all updated hashes in root->leaf order
def jira(test_key): def decorator(test_item): def modified_test(*args, **kwargs): save_jira_conf() try: test_item(*args, **kwargs) except Exception as e: error_message = get_error_message_from_exception(e) test_comment = "The test '{}' has failed: {}".format(args[0].get_method_name(), error_message) add_jira_status(test_key, 'Fail', test_comment) raise add_jira_status(test_key, 'Pass', None) modified_test.__name__ = test_item.__name__ return modified_test return decorator
Decorator to update test status in Jira :param test_key: test case key in Jira :returns: jira test
def destroy(self): if not hasattr(self, 'server') or not self.server: raise Exception( ) return self.server.cloud_manager.delete_firewall_rule( self.server.uuid, self.position )
Remove this FirewallRule from the API. This instance must be associated with a server for this method to work, which is done by instantiating via server.get_firewall_rules().
def _attempt_to_raise_license_error(data_dir): if isinstance(data_dir, bytes): data_dir = _decode(data_dir) data_dir = os.path.join(data_dir, 'Data') current_date = dt.date.today().strftime('%Y%m%d') timestamp = dt.datetime.today().strftime('[%Y-%m-%d %H:%M:%S]') data_files = os.listdir(data_dir) for f in data_files: if f == (current_date + '.err'): file_name = os.path.join(data_dir, f) with fopen(file_name) as error_file: for line in error_file: if not line.startswith(timestamp): continue if 'Not valid license' in line: raise LicenseError('Your license appears to have ' 'expired. Try running "pynlpir ' 'update".') elif 'Can not open License file' in line: raise LicenseError('Your license appears to be ' 'missing. Try running "pynlpir ' 'update".')
Raise an error if NLPIR has detected a missing or expired license. :param str data_dir: The directory containing NLPIR's `Data` directory. :raises LicenseError: The NLPIR license appears to be missing or expired.
def get_smart_invite(self, smart_invite_id, recipient_email): params = { 'smart_invite_id': smart_invite_id, 'recipient_email': recipient_email } return self.request_handler.get('smart_invites', params=params, use_api_key=True).json()
Gets the details for a smart invite. :param string smart_invite_id: - A String uniquely identifying the event for your application (note: this is NOT an ID generated by Cronofy). :param string recipient_email: - The email address for the recipient to get details for.
def circles_pycairo(width, height, color): cairo_color = color / rgb(255, 255, 255) surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height) ctx = cairo.Context(surface) ctx.new_path() ctx.set_source_rgb(cairo_color.red, cairo_color.green, cairo_color.blue) ctx.arc(width / 2, height / 2, width / 2, 0, 2 * pi) ctx.fill() surface.write_to_png('circles.png')
Implementation of circle border with PyCairo.
def _append_object_entry(obj, list_name, entry): obj_list = getattr(obj, list_name, None) if obj_list is None: obj_list = [] setattr(obj, list_name, obj_list) assert isinstance(obj_list, list) if entry not in obj_list: obj_list.append(entry)
Appends the given entry in the given object list. Creates the list field if needed. :param obj: The object that contains the list :param list_name: The name of the list member in *obj* :param entry: The entry to be added to the list :raise ValueError: Invalid attribute content
def search( self, q="yellow flower", lang="en", video_type="all", category="", min_width=0, min_height=0, editors_choice="false", safesearch="false", order="popular", page=1, per_page=20, callback="", pretty="false", ): payload = { "key": self.api_key, "q": q, "lang": lang, "video_type": video_type, "category": category, "min_width": min_width, "min_height": min_height, "editors_choice": editors_choice, "safesearch": safesearch, "order": order, "page": page, "per_page": per_page, "callback": callback, "pretty": pretty, } resp = get(self.root_url + "videos/", params=payload) if resp.status_code == 200: return resp.json() else: raise ValueError(resp.text)
returns videos API data in dict Videos search :param q :type str :desc A URL encoded search term. If omitted, all images are returned. This value may not exceed 100 characters. Example: "yellow+flower" Default: "yellow+flower" :param lang :type str :desc Language code of the language to be searched in. Accepted values: cs, da, de, en, es, fr, id, it, hu, nl, no, pl, pt, ro, sk, fi, sv, tr, vi, th, bg, ru, el, ja, ko, zh Default: "en" For more info, see https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes :param video_type :type str :desc Filter results by video type. Accepted values: "all", "film", "animation" Default: "all" :param category :type str :desc Filter results by category. Accepted values: fashion, nature, backgrounds, science, education, people, feelings, religion, health, places, animals, industry, food, computer, sports, transportation, travel, buildings, business, music :param min_width :type int :desc Minimum image width Default: 0 :param min_height :type int :desc Minimum image height Default: 0 :param editors_choice :type bool (python-pixabay use "true" and "false" string instead) :desc Select images that have received an Editor's Choice award. Accepted values: "true", "false" Default: "false" :param safesearch :type bool (python-pixabay use "true" and "false" string instead) :desc A flag indicating that only images suitable for all ages should be returned. Accepted values: "true", "false" Default: "false" :param order :type str :desc How the results should be ordered. Accepted values: "popular", "latest" Default: "popular" :param page :type int :desc Returned search results are paginated. Use this parameter to select the page number. Default: 1 :param per_page :type int :desc Determine the number of results per page. Accepted values: 3 - 200 Default: 20 :param callback :type str :desc JSONP callback function name :param pretty :type bool (python-pixabay use "true" and "false" string instead) :desc Indent JSON output. This option should not be used in production. Accepted values: "true", "false" Default: "false" Code Example >>> from pixabay import Video >>> >>> video = Video("api_key") >>> video.search(q="apple", page=1)
def download_spt_forecast(self, extract_directory): needed_vars = (self.spt_watershed_name, self.spt_subbasin_name, self.spt_forecast_date_string, self.ckan_engine_url, self.ckan_api_key, self.ckan_owner_organization) if None not in needed_vars: er_manager = ECMWFRAPIDDatasetManager(self.ckan_engine_url, self.ckan_api_key, self.ckan_owner_organization) er_manager.download_prediction_dataset(watershed=self.spt_watershed_name, subbasin=self.spt_subbasin_name, date_string=self.spt_forecast_date_string, extract_directory=extract_directory) return glob(os.path.join(extract_directory, self.spt_forecast_date_string, "Qout*52.nc"))[0] elif needed_vars.count(None) == len(needed_vars): log.info("Skipping streamflow forecast download ...") return None else: raise ValueError("To download the forecasts, you need to set: \n" "spt_watershed_name, spt_subbasin_name, spt_forecast_date_string \n" "ckan_engine_url, ckan_api_key, and ckan_owner_organization.")
Downloads Streamflow Prediction Tool forecast data
def setCenter(self, loc): offset = self.getCenter().getOffset(loc) return self.setLocation(self.getTopLeft().offset(offset))
Move this region so it is centered on ``loc``