code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def DualDBSystemCronJob(legacy_name=None, stateful=False): def Decorator(cls): if not legacy_name: raise ValueError("legacy_name has to be provided") if stateful: aff4_base_cls = StatefulSystemCronFlow else: aff4_base_cls = SystemCronFlow if issubclass(cls, cronjobs.SystemCronJobBase): raise ValueError("Mixin class shouldn't inherit from SystemCronJobBase") if issubclass(cls, aff4_base_cls): raise ValueError("Mixin class shouldn't inherit from %s" % aff4_base_cls.__name__) aff4_cls = compatibility.MakeType( legacy_name, (cls, LegacyCronJobAdapterMixin, aff4_base_cls), {}) module = sys.modules[cls.__module__] setattr(module, legacy_name, aff4_cls) reldb_cls = compatibility.MakeType( compatibility.GetName(cls), (cls, cronjobs.SystemCronJobBase), {}) return reldb_cls return Decorator
Decorator that creates AFF4 and RELDB cronjobs from a given mixin.
def network_info(): def extract(host, family): return socket.getaddrinfo(host, None, family)[0][4][0] host = socket.gethostname() response = { 'hostname': host, 'ipv4': None, 'ipv6': None } with suppress(IndexError, socket.gaierror): response['ipv4'] = extract(host, socket.AF_INET) with suppress(IndexError, socket.gaierror): response['ipv6'] = extract(host, socket.AF_INET6) return response
Returns hostname, ipv4 and ipv6.
def add_hbar_widget(self, ref, x=1, y=1, length=10): if ref not in self.widgets: widget = widgets.HBarWidget(screen=self, ref=ref, x=x, y=y, length=length) self.widgets[ref] = widget return self.widgets[ref]
Add Horizontal Bar Widget
def serialize_iso(attr, **kwargs): if isinstance(attr, str): attr = isodate.parse_datetime(attr) try: if not attr.tzinfo: _LOGGER.warning( "Datetime with no tzinfo will be considered UTC.") utc = attr.utctimetuple() if utc.tm_year > 9999 or utc.tm_year < 1: raise OverflowError("Hit max or min date") microseconds = str(attr.microsecond).rjust(6,'0').rstrip('0').ljust(3, '0') if microseconds: microseconds = '.'+microseconds date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec) return date + microseconds + 'Z' except (ValueError, OverflowError) as err: msg = "Unable to serialize datetime object." raise_with_traceback(SerializationError, msg, err) except AttributeError as err: msg = "ISO-8601 object must be valid Datetime object." raise_with_traceback(TypeError, msg, err)
Serialize Datetime object into ISO-8601 formatted string. :param Datetime attr: Object to be serialized. :rtype: str :raises: SerializationError if format invalid.
def dec(self, key, delta=1): self.set(key, (self.get(key) or 0) - delta)
Decrements the value of a key by `delta`. If the key does not yet exist it is initialized with `-delta`. For supporting caches this is an atomic operation. :param key: the key to increment. :param delta: the delta to subtract.
def dumps(obj, *args, **kwargs): kwargs['default'] = object2dict return json.dumps(obj, *args, **kwargs)
Serialize a object to string Basic Usage: >>> import simplekit.objson >>> obj = {'name':'wendy'} >>> print simplekit.objson.dumps(obj) :param obj: a object which need to dump :param args: Optional arguments that :func:`json.dumps` takes. :param kwargs: Keys arguments that :py:func:`json.dumps` takes. :return: string
def _datasource_cell(args, cell_body): name = args['name'] paths = args['paths'] data_format = (args['format'] or 'CSV').lower() compressed = args['compressed'] or False record = google.datalab.utils.commands.parse_config( cell_body, google.datalab.utils.commands.notebook_environment(), as_dict=False) jsonschema.validate(record, BigQuerySchema.TABLE_SCHEMA_SCHEMA) schema = bigquery.Schema(record['schema']) datasource = bigquery.ExternalDataSource(source=paths, source_format=data_format, compressed=compressed, schema=schema) google.datalab.utils.commands.notebook_environment()[name] = datasource
Implements the BigQuery datasource cell magic for ipython notebooks. The supported syntax is %%bq datasource --name <var> --paths <url> [--format <CSV|JSON>] <schema> Args: args: the optional arguments following '%%bq datasource' cell_body: the datasource's schema in json/yaml
def _ParseCommon2003CachedEntry(self, value_data, cached_entry_offset): data_type_map = self._GetDataTypeMap( 'appcompatcache_cached_entry_2003_common') try: cached_entry = self._ReadStructureFromByteStream( value_data[cached_entry_offset:], cached_entry_offset, data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry value with error: {0!s}'.format( exception)) if cached_entry.path_size > cached_entry.maximum_path_size: raise errors.ParseError('Path size value out of bounds.') path_end_of_string_size = ( cached_entry.maximum_path_size - cached_entry.path_size) if cached_entry.path_size == 0 or path_end_of_string_size != 2: raise errors.ParseError('Unsupported path size values.') return cached_entry
Parses the cached entry structure common for Windows 2003, Vista and 7. Args: value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: appcompatcache_cached_entry_2003_common: cached entry structure common for Windows 2003, Windows Vista and Windows 7. Raises: ParseError: if the value data could not be parsed.
def add_enclosure(self, left_char, right_char): assert len(left_char) == 1, \ "Parameter left_char must be character not string" assert len(right_char) == 1, \ "Parameter right_char must be character not string" self._enclosure.add((left_char, right_char)) self._after_tld_chars = self._get_after_tld_chars()
Add new enclosure pair of characters. That and should be removed when their presence is detected at beginning and end of found URL :param str left_char: left character of enclosure pair - e.g. "(" :param str right_char: right character of enclosure pair - e.g. ")"
def sas_logical_jbod_attachments(self): if not self.__sas_logical_jbod_attachments: self.__sas_logical_jbod_attachments = SasLogicalJbodAttachments(self.__connection) return self.__sas_logical_jbod_attachments
Gets the SAS Logical JBOD Attachments client. Returns: SasLogicalJbodAttachments:
def get_cluster( self, project_id, region, cluster_name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "get_cluster" not in self._inner_api_calls: self._inner_api_calls[ "get_cluster" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_cluster, default_retry=self._method_configs["GetCluster"].retry, default_timeout=self._method_configs["GetCluster"].timeout, client_info=self._client_info, ) request = clusters_pb2.GetClusterRequest( project_id=project_id, region=region, cluster_name=cluster_name ) return self._inner_api_calls["get_cluster"]( request, retry=retry, timeout=timeout, metadata=metadata )
Gets the resource representation for a cluster in a project. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.get_cluster(project_id, region, cluster_name) Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def create_org_smarthost(self, orgid, data): return self.api_call( ENDPOINTS['orgsmarthosts']['new'], dict(orgid=orgid), body=data)
Create an organization smarthost
def jsonify(*args, **kwargs): return Response( json.dumps( dict( *args, **kwargs), cls=MongoJSONEncoder), mimetype='application/json')
jsonify with support for MongoDB ObjectId
def _preloop_hook(self) -> None: self._stop_thread = False self._alerter_thread = threading.Thread(name='alerter', target=self._alerter_thread_func) self._alerter_thread.start()
Start the alerter thread
def _CompareFields(field, other_field): field_attrs = _GetFieldAttributes(field) other_field_attrs = _GetFieldAttributes(other_field) if field_attrs != other_field_attrs: return False return field.__class__ == other_field.__class__
Checks if two ProtoRPC fields are "equal". Compares the arguments, rather than the id of the elements (which is the default __eq__ behavior) as well as the class of the fields. Args: field: A ProtoRPC message field to be compared. other_field: A ProtoRPC message field to be compared. Returns: Boolean indicating whether the fields are equal.
def session_hook(exception): safeprint( "The resource you are trying to access requires you to " "re-authenticate with specific identities." ) params = exception.raw_json["authorization_parameters"] message = params.get("session_message") if message: safeprint("message: {}".format(message)) identities = params.get("session_required_identities") if identities: id_str = " ".join(identities) safeprint( "Please run\n\n" " globus session update {}\n\n" "to re-authenticate with the required identities".format(id_str) ) else: safeprint( 'Please use "globus session update" to re-authenticate ' "with specific identities".format(id_str) ) exit_with_mapped_status(exception.http_status)
Expects an exception with an authorization_paramaters field in its raw_json
def atlasdb_format_query( query, values ): return "".join( ["%s %s" % (frag, "'%s'" % val if type(val) in [str, unicode] else val) for (frag, val) in zip(query.split("?"), values + ("",))] )
Turn a query into a string for printing. Useful for debugging.
def get_unread_message_count_between(parser, token): try: tag_name, arg = token.contents.split(None, 1) except ValueError: raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0]) m = re.search(r'(.*?) and (.*?) as (\w+)', arg) if not m: raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name) um_from_user, um_to_user, var_name = m.groups() return MessageCount(um_from_user, var_name, um_to_user)
Returns the unread message count between two users. Syntax:: {% get_unread_message_count_between [user] and [user] as [var_name] %} Example usage:: {% get_unread_message_count_between funky and wunki as message_count %}
def data(self, data): if self.state == STATE_SOURCE_ID: self.context.audit_record.source_id = int(data) elif self.state == STATE_DATETIME: dt = datetime.datetime.strptime(data, "%Y-%m-%dT%H:%M:%S") self.get_parent_element().datetimestamp = dt elif self.state == STATE_REASON_FOR_CHANGE: self.context.audit_record.reason_for_change = data.strip() or None self.state = STATE_NONE
Called for text between tags
def _tristate_parent(self, item): self.change_state(item, "tristate") parent = self.parent(item) if parent: self._tristate_parent(parent)
Put the box of item in tristate and change the state of the boxes of item's ancestors accordingly.
def get_picture(self, login=None, **kwargs): _login = kwargs.get( 'login', login or self._login ) _activities_url = PICTURE_URL.format(login=_login) return self._request_api(url=_activities_url).content
Get a user's picture. :param str login: Login of the user to check :return: JSON
def fa2s2b(fastas): s2b = {} for fa in fastas: for seq in parse_fasta(fa): s = seq[0].split('>', 1)[1].split()[0] s2b[s] = fa.rsplit('/', 1)[-1].rsplit('.', 1)[0] return s2b
convert fastas to s2b dictionary
def Execute(self, http): self._Execute(http) for key in self.__request_response_handlers: response = self.__request_response_handlers[key].response callback = self.__request_response_handlers[key].handler exception = None if response.status_code >= 300: exception = exceptions.HttpError.FromResponse(response) if callback is not None: callback(response, exception) if self.__callback is not None: self.__callback(response, exception)
Execute all the requests as a single batched HTTP request. Args: http: A httplib2.Http object to be used with the request. Returns: None Raises: BatchError if the response is the wrong format.
def set_env(user, name, value=None): lst = list_tab(user) for env in lst['env']: if name == env['name']: if value != env['value']: rm_env(user, name) jret = set_env(user, name, value) if jret == 'new': return 'updated' else: return jret return 'present' env = {'name': name, 'value': value} lst['env'].append(env) comdat = _write_cron_lines(user, _render_tab(lst)) if comdat['retcode']: return comdat['stderr'] return 'new'
Set up an environment variable in the crontab. CLI Example: .. code-block:: bash salt '*' cron.set_env root MAILTO user@example.com
def _import_object(self, path, look_for_cls_method): last_nth = 2 if look_for_cls_method else 1 path = path.split('.') module_path = '.'.join(path[:-last_nth]) class_name = path[-last_nth] module = importlib.import_module(module_path) if look_for_cls_method and path[-last_nth:][0] == path[-last_nth]: class_method = path[-last_nth:][1] else: class_method = None return getattr(module, class_name), class_name, class_method
Imports the module that contains the referenced method. Args: path: python path of class/function look_for_cls_method (bool): If True, treat the last part of path as class method. Returns: Tuple. (class object, class name, method to be called)
def _get_satellite_tile(self, x_tile, y_tile, z_tile): cache_file = "mapscache/{}.{}.{}.jpg".format(z_tile, x_tile, y_tile) if cache_file not in self._tiles: if not os.path.isfile(cache_file): url = _IMAGE_URL.format(z_tile, x_tile, y_tile, _KEY) data = requests.get(url).content with open(cache_file, 'wb') as f: f.write(data) self._tiles[cache_file] = [ x_tile, y_tile, z_tile, ColourImageFile(self._screen, cache_file, height=_START_SIZE, dither=True, uni=self._screen.unicode_aware), True] if len(self._tiles) > _CACHE_SIZE: self._tiles.popitem(False) self._screen.force_update()
Load up a single satellite image tile.
def _send_heartbeat_request(self): if self.coordinator_unknown(): e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id) return Future().failure(e) elif not self._client.ready(self.coordinator_id, metadata_priority=False): e = Errors.NodeNotReadyError(self.coordinator_id) return Future().failure(e) version = 0 if self.config['api_version'] < (0, 11, 0) else 1 request = HeartbeatRequest[version](self.group_id, self._generation.generation_id, self._generation.member_id) log.debug("Heartbeat: %s[%s] %s", request.group, request.generation_id, request.member_id) future = Future() _f = self._client.send(self.coordinator_id, request) _f.add_callback(self._handle_heartbeat_response, future, time.time()) _f.add_errback(self._failed_request, self.coordinator_id, request, future) return future
Send a heartbeat request
def __diff_iterable(self, level, parents_ids=frozenset({})): subscriptable = self.__iterables_subscriptable(level.t1, level.t2) if subscriptable: child_relationship_class = SubscriptableIterableRelationship else: child_relationship_class = NonSubscriptableIterableRelationship for i, (x, y) in enumerate( zip_longest( level.t1, level.t2, fillvalue=ListItemRemovedOrAdded)): if y is ListItemRemovedOrAdded: change_level = level.branch_deeper( x, notpresent, child_relationship_class=child_relationship_class, child_relationship_param=i) self.__report_result('iterable_item_removed', change_level) elif x is ListItemRemovedOrAdded: change_level = level.branch_deeper( notpresent, y, child_relationship_class=child_relationship_class, child_relationship_param=i) self.__report_result('iterable_item_added', change_level) else: item_id = id(x) if parents_ids and item_id in parents_ids: continue parents_ids_added = add_to_frozen_set(parents_ids, item_id) next_level = level.branch_deeper( x, y, child_relationship_class=child_relationship_class, child_relationship_param=i) self.__diff(next_level, parents_ids_added)
Difference of iterables
def contentsMethod(self, contentFilter): allowedroles = ['Manager', 'LabManager', 'Client', 'LabClerk'] pm = getToolByName(self.context, "portal_membership") member = pm.getAuthenticatedMember() roles = member.getRoles() allowed = [a for a in allowedroles if a in roles] return self.context.objectValues('ARReport') if allowed else []
ARReport objects associated to the current Analysis request. If the user is not a Manager or LabManager or Client, no items are displayed.
def _analyze_indexed_fields(indexed_fields): result = {} for field_name in indexed_fields: if not isinstance(field_name, basestring): raise TypeError('Field names must be strings; got %r' % (field_name,)) if '.' not in field_name: if field_name in result: raise ValueError('Duplicate field name %s' % field_name) result[field_name] = None else: head, tail = field_name.split('.', 1) if head not in result: result[head] = [tail] elif result[head] is None: raise ValueError('Field name %s conflicts with ancestor %s' % (field_name, head)) else: result[head].append(tail) return result
Internal helper to check a list of indexed fields. Args: indexed_fields: A list of names, possibly dotted names. (A dotted name is a string containing names separated by dots, e.g. 'foo.bar.baz'. An undotted name is a string containing no dots, e.g. 'foo'.) Returns: A dict whose keys are undotted names. For each undotted name in the argument, the dict contains that undotted name as a key with None as a value. For each dotted name in the argument, the dict contains the first component as a key with a list of remainders as values. Example: If the argument is ['foo.bar.baz', 'bar', 'foo.bletch'], the return value is {'foo': ['bar.baz', 'bletch'], 'bar': None}. Raises: TypeError if an argument is not a string. ValueError for duplicate arguments and for conflicting arguments (when an undotted name also appears as the first component of a dotted name).
def get_user(self, username="", ext_collections=False, ext_galleries=False): if not username and self.standard_grant_type == "authorization_code": response = self._req('/user/whoami') u = User() u.from_dict(response) else: if not username: raise DeviantartError("No username defined.") else: response = self._req('/user/profile/{}'.format(username), { 'ext_collections' : ext_collections, 'ext_galleries' : ext_galleries }) u = User() u.from_dict(response['user']) return u
Get user profile information :param username: username to lookup profile of :param ext_collections: Include collection folder info :param ext_galleries: Include gallery folder info
def clearLayout(layout): while layout.count(): child = layout.takeAt(0) child.widget().deleteLater()
Removes all widgets in the layout. Useful when opening a new file, want to clear everything.
def get_asset(self): if not bool(self._my_map['assetId']): raise errors.IllegalState('asset empty') mgr = self._get_provider_manager('REPOSITORY') if not mgr.supports_asset_lookup(): raise errors.OperationFailed('Repository does not support Asset lookup') lookup_session = mgr.get_asset_lookup_session(proxy=getattr(self, "_proxy", None)) lookup_session.use_federated_repository_view() return lookup_session.get_asset(self.get_asset_id())
Gets the ``Asset`` corresponding to this content. return: (osid.repository.Asset) - the asset *compliance: mandatory -- This method must be implemented.*
def entry_breadcrumbs(entry): date = entry.publication_date if is_aware(date): date = localtime(date) return [year_crumb(date), month_crumb(date), day_crumb(date), Crumb(entry.title)]
Breadcrumbs for an Entry.
def get_action_meanings(self): actions = sorted(self._action_meanings.keys()) return [self._action_meanings[action] for action in actions]
Return a list of actions meanings.
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int: return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards)
Returns the number of shards. :param num_samples: Number of training data samples. :param samples_per_shard: Samples per shard. :param min_num_shards: Minimum number of shards. :return: Number of shards.
def get_texts_box(texts, fs): max_len = max(map(len, texts)) return (fs, text_len(max_len, fs))
Approximation of multiple texts bounds
def _read_response(self, may_block=False): res = self._waitfor_set(yubikey_defs.RESP_PENDING_FLAG, may_block)[:7] while True: this = self._read() flags = yubico_util.ord_byte(this[7]) if flags & yubikey_defs.RESP_PENDING_FLAG: seq = flags & 0b00011111 if res and (seq == 0): break res += this[:7] else: break self._write_reset() return res
Wait for a response to become available, and read it.
def lookup(self, plain_src_ns): if plain_src_ns in self._ex_namespace_set: return None if not self._regex_map and not self._plain: return Namespace( dest_name=plain_src_ns, source_name=plain_src_ns, include_fields=self._include_fields, exclude_fields=self._exclude_fields, ) try: return self._plain[plain_src_ns] except KeyError: for regex, namespace in self._regex_map: new_name = match_replace_regex(regex, plain_src_ns, namespace.dest_name) if not new_name: continue new_namespace = namespace.with_options( dest_name=new_name, source_name=plain_src_ns ) self._add_plain_namespace(new_namespace) return new_namespace self._ex_namespace_set.add(plain_src_ns) return None
Given a plain source namespace, return the corresponding Namespace object, or None if it is not included.
def scale_and_crop_with_ranges( im, size, size_range=None, crop=False, upscale=False, zoom=None, target=None, **kwargs): min_width, min_height = size if min_width == 0 or min_height == 0 or not size_range: return scale_and_crop(im, size, crop, upscale, zoom, target, **kwargs) max_width = min_width + size_range[0] max_height = min_height + size_range[1] min_ar = min_width * 1.0 / max_height max_ar = max_width * 1.0 / min_height img_width, img_height = [float(v) for v in im.size] img_ar = img_width/img_height if img_ar <= min_ar: size = (min_width, max_height) elif img_ar >= max_ar: size = (max_width, min_height) else: size = (max_width, max_height) return scale_and_crop(im, size, crop, upscale, zoom, target, **kwargs)
An easy_thumbnails processor that accepts a `size_range` tuple, which indicates that one or both dimensions can give by a number of pixels in order to minimize cropping.
def lines2file(lines, filename, encoding='utf-8'): with codecs.open(filename, "w", encoding=encoding) as f: for line in lines: f.write(line) f.write("\n")
write json stream, write lines too
def swap_dims(self, dims_dict): ds = self._to_temp_dataset().swap_dims(dims_dict) return self._from_temp_dataset(ds)
Returns a new DataArray with swapped dimensions. Parameters ---------- dims_dict : dict-like Dictionary whose keys are current dimension names and whose values are new names. Each value must already be a coordinate on this array. Returns ------- renamed : Dataset DataArray with swapped dimensions. See Also -------- DataArray.rename Dataset.swap_dims
def keep_only_sticked_and_selected_tabs(self): if not global_gui_config.get_config_value('KEEP_ONLY_STICKY_STATES_OPEN', True): return page_id = self.view.notebook.get_current_page() if page_id == -1: return page = self.view.notebook.get_nth_page(page_id) current_state_identifier = self.get_state_identifier_for_page(page) states_to_be_closed = [] for state_identifier, tab_info in list(self.tabs.items()): if current_state_identifier == state_identifier: continue if tab_info['is_sticky']: continue states_to_be_closed.append(state_identifier) for state_identifier in states_to_be_closed: self.close_page(state_identifier, delete=False)
Close all tabs, except the currently active one and all sticked ones
def createDataChannel(self, label, maxPacketLifeTime=None, maxRetransmits=None, ordered=True, protocol='', negotiated=False, id=None): if maxPacketLifeTime is not None and maxRetransmits is not None: raise ValueError('Cannot specify both maxPacketLifeTime and maxRetransmits') if not self.__sctp: self.__createSctpTransport() parameters = RTCDataChannelParameters( id=id, label=label, maxPacketLifeTime=maxPacketLifeTime, maxRetransmits=maxRetransmits, negotiated=negotiated, ordered=ordered, protocol=protocol) return RTCDataChannel(self.__sctp, parameters)
Create a data channel with the given label. :rtype: :class:`RTCDataChannel`
def create_module(sym, data_shapes, label_shapes, label_names, gpus=''): if gpus == '': devices = mx.cpu() else: devices = [mx.gpu(int(i)) for i in gpus.split(',')] data_names = [data_shape[0] for data_shape in data_shapes] mod = mx.mod.Module( symbol=sym, data_names=data_names, context=devices, label_names=label_names ) mod.bind( for_training=False, data_shapes=data_shapes, label_shapes=label_shapes ) return mod
Creates a new MXNet module. Parameters ---------- sym : Symbol An MXNet symbol. input_shape: tuple The shape of the input data in the form of (batch_size, channels, height, width) files: list of strings List of URLs pertaining to files that need to be downloaded in order to use the model. data_shapes: list of tuples. List of tuples where each tuple is a pair of input variable name and its shape. label_shapes: list of (str, tuple) Typically is ``data_iter.provide_label``. label_names: list of str Name of the output labels in the MXNet symbolic graph. gpus: str Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6. If empty, we use CPU. Returns ------- MXNet module
def visit_slice(self, node): lower = node.lower.accept(self) if node.lower else "" upper = node.upper.accept(self) if node.upper else "" step = node.step.accept(self) if node.step else "" if step: return "%s:%s:%s" % (lower, upper, step) return "%s:%s" % (lower, upper)
return an astroid.Slice node as string
async def get_message(self, ignore_subscribe_messages=False, timeout=0): response = await self.parse_response(block=False, timeout=timeout) if response: return self.handle_message(response, ignore_subscribe_messages) return None
Get the next message if one is available, otherwise None. If timeout is specified, the system will wait for `timeout` seconds before returning. Timeout should be specified as a floating point number.
def revert(self, unchanged_only=False): if self._reverted: raise errors.ChangelistError('This changelist has been reverted') change = self._change if self._change == 0: change = 'default' cmd = ['revert', '-c', str(change)] if unchanged_only: cmd.append('-a') files = [f.depotFile for f in self._files] if files: cmd += files self._connection.run(cmd) self._files = [] self._reverted = True
Revert all files in this changelist :param unchanged_only: Only revert unchanged files :type unchanged_only: bool :raises: :class:`.ChangelistError`
def estimate(coll, filter={}, sample=1): total = coll.estimated_document_count() if not filter and sample == 1: return total if sample <= 1: sample *= total pipeline = list(builtins.filter(None, [ {'$sample': {'size': sample}} if sample < total else {}, {'$match': filter}, {'$count': 'matched'}, ])) docs = next(coll.aggregate(pipeline)) ratio = docs['matched'] / sample return int(total * ratio)
Estimate the number of documents in the collection matching the filter. Sample may be a fixed number of documents to sample or a percentage of the total collection size. >>> coll = getfixture('bulky_collection') >>> estimate(coll) 100 >>> query = {"val": {"$gte": 50}} >>> val = estimate(coll, filter=query) >>> val > 0 True >>> val = estimate(coll, filter=query, sample=10) >>> val > 0 True >>> val = estimate(coll, filter=query, sample=.1) >>> val > 0 True
def variance(self, param): param_number = self.model.params.index(param) try: return self.covariance_matrix[param_number, param_number] except TypeError: return None
Return the variance in a given parameter as found by the fit. :param param: ``Parameter`` Instance. :return: Variance of ``param``.
def _resize(self, init=False): col, row = self._selection_to_col_row(self.selection) if not (self.startPos <= row <= self.startPos + self.list_maxY - 1): while row > self.startPos: self.startPos += 1 while row < self.startPos + self.list_maxY - 1: self.startPos -= 1 if init and row > self.list_maxY: new_startPos = self._num_of_rows - self.list_maxY + 1 if row > new_startPos: if logger.isEnabledFor(logging.DEBUG): logger.debug('setting startPos at {}'.format(new_startPos)) self.startPos = new_startPos self.refresh_selection()
if the selection at the end of the list, try to scroll down
def set_hierarchy(self, hierarchy): self.hierarchy = dict([(k, i) for i, j in enumerate(hierarchy) for k in j])
Sets an alternative sonority hierarchy, note that you will also need to specify the vowelset with the set_vowels, in order for the module to correctly identify each nucleus. The order of the phonemes defined is by decreased consonantality Example: >>> s = Syllabifier() >>> s.set_hierarchy([['i', 'u'], ['e'], ['a'], ['r'], ['m', 'n'], ['f']]) >>> s.set_vowels(['i', 'u', 'e', 'a']) >>> s.syllabify('feminarum') ['fe', 'mi', 'na', 'rum']
def open(self): try: self.graph.open(self.cache_uri, create=False) self._add_namespaces(self.graph) self.is_open = True except Exception: raise InvalidCacheException('The cache is invalid or not created')
Opens an existing cache.
def execute(self, env, args): task_name = args.task_name clone_task = args.clone_task if not env.task.create(task_name, clone_task): raise errors.FocusError(u'Could not create task "{0}"' .format(task_name)) if not args.skip_edit: task_config = env.task.get_config_path(task_name) if not _edit_task_config(env, task_config, confirm=True): raise errors.FocusError(u'Could not open task config: {0}' .format(task_config))
Creates a new task. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser.
def _find_elements(self, result, elements): element_mapping = {} result = StringIO.StringIO(result) for _, e in ET.iterparse(result, events=('end',)): if not elements: break if e.tag in elements: element_mapping[e.tag] = e.text elements.remove(e.tag) return element_mapping
Find interesting elements from XML. This function tries to only look for specified elements without parsing the entire XML. The specified elements is better located near the beginning. Args: result: response XML. elements: a set of interesting element tags. Returns: A dict from element tag to element value.
def hook_alias(self, alias, model_obj=None): try: search_alias = self._alias_hooks[alias] except KeyError: raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias)) else: if search_alias._applicable_models and \ ((model_obj and model_obj not in search_alias._applicable_models) or \ not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])): raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type)) return search_alias.prepare(self, model_obj).alias_for
Returns the alias function, if it exists and if it can be applied to this model.
def get_spectra_id(self, fn_id, retention_time=None, scan_nr=None): cursor = self.get_cursor() sql = 'SELECT spectra_id FROM mzml WHERE mzmlfile_id=? ' values = [fn_id] if retention_time is not None: sql = '{0} AND retention_time=?'.format(sql) values.append(retention_time) if scan_nr is not None: sql = '{0} AND scan_nr=?'.format(sql) values.append(scan_nr) cursor.execute(sql, tuple(values)) return cursor.fetchone()[0]
Returns spectra id for spectra filename and retention time
def coverage(self): intervals = ReadIntervals(self.subjectLength) for hsp in self.hsps(): intervals.add(hsp.subjectStart, hsp.subjectEnd) return intervals.coverage()
Get the fraction of this title sequence that is matched by its reads. @return: The C{float} fraction of the title sequence matched by its reads.
def manages(self, cfg_part): logger.debug("Do I (%s/%s) manage: %s, my managed configuration(s): %s", self.type, self.name, cfg_part, self.cfg_managed) if not self.cfg_managed: logger.info("I (%s/%s) do not manage (yet) any configuration!", self.type, self.name) return False for managed_cfg in list(self.cfg_managed.values()): if managed_cfg['managed_conf_id'] == cfg_part.instance_id \ and managed_cfg['push_flavor'] == cfg_part.push_flavor: logger.debug("I do manage this configuration: %s", cfg_part) break else: logger.warning("I (%s/%s) do not manage this configuration: %s", self.type, self.name, cfg_part) return False return True
Tell if the satellite is managing this configuration part The managed configuration is formed as a dictionary indexed on the link instance_id: { u'SchedulerLink_1': { u'hash': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'push_flavor': u'4d08630a3483e1eac7898e7a721bd5d7768c8320', u'managed_conf_id': [u'Config_1'] } } Note that the managed configuration is a string array rather than a simple string... no special for this reason, probably due to the serialization when the configuration is pushed :/ :param cfg_part: configuration part as prepare by the Dispatcher :type cfg_part: Conf :return: True if the satellite manages this configuration :rtype: bool
def Scan(self, scan_context, auto_recurse=True, scan_path_spec=None): if not scan_context: raise ValueError('Invalid scan context.') scan_context.updated = False if scan_path_spec: scan_node = scan_context.GetScanNode(scan_path_spec) else: scan_node = scan_context.GetUnscannedScanNode() if scan_node: self._ScanNode(scan_context, scan_node, auto_recurse=auto_recurse)
Scans for supported formats. Args: scan_context (SourceScannerContext): source scanner context. auto_recurse (Optional[bool]): True if the scan should automatically recurse as far as possible. scan_path_spec (Optional[PathSpec]): path specification to indicate where the source scanner should continue scanning, where None indicates the scanner will start with the sources. Raises: ValueError: if the scan context is invalid.
def bargraph(data, max_key_width=30): lines = [] max_length = min(max(len(key) for key in data.keys()), max_key_width) max_val = max(data.values()) max_val_length = max( len(_style_value(val)) for val in data.values()) term_width = get_terminal_size()[0] max_bar_width = term_width - MARGIN - (max_length + 3 + max_val_length + 3) template = u"{key:{key_width}} [ {value:{val_width}} ] {bar}" for key, value in data.items(): try: bar = int(math.ceil(max_bar_width * value / max_val)) * TICK except ZeroDivisionError: bar = '' line = template.format( key=key[:max_length], value=_style_value(value), bar=bar, key_width=max_length, val_width=max_val_length ) lines.append(line) return '\n'.join(lines)
Return a bar graph as a string, given a dictionary of data.
def gen500(request, baseURI, project=None): return HttpResponseServerError( render_to_response('plugIt/500.html', { 'context': { 'ebuio_baseUrl': baseURI, 'ebuio_userMode': request.session.get('plugit-standalone-usermode', 'ano'), }, 'project': project }, context_instance=RequestContext(request)))
Return a 500 error
def all_hosts(self): return set(imap(common.clean_node, itertools.chain( self._doc.get('hosts', []), self._doc.get('passives', []), self._doc.get('arbiters', []))))
List of hosts, passives, and arbiters known to this server.
def home_win_percentage(self): try: result = float(self.home_wins) / \ float(self.home_wins + self.home_losses) return round(result, 3) except ZeroDivisionError: return 0.0
Returns a ``float`` of the percentage of games the home team has won after the conclusion of the game. Percentage ranges from 0-1.
def send_rally_points(self): self.mav_param.mavset(self.master,'RALLY_TOTAL',self.rallyloader.rally_count(),3) for i in range(self.rallyloader.rally_count()): self.send_rally_point(i)
send rally points from rallyloader
def num_throats(self, labels='all', mode='union'): r Ts = self._get_indices(labels=labels, mode=mode, element='throat') Nt = sp.shape(Ts)[0] return Nt
r""" Return the number of throats of the specified labels Parameters ---------- labels : list of strings, optional The throat labels that should be included in the count. If not supplied, all throats are counted. mode : string, optional Specifies how the count should be performed. The options are: **'or', 'union', 'any'** : (default) Throats with *one or more* of the given labels are counted. **'and', 'intersection', 'all'** : Throats with *all* of the given labels are counted. **'xor', 'exclusive_or'** : Throats with *only one* of the given labels are counted. **'nor', 'none', 'not'** : Throats with *none* of the given labels are counted. **'nand'** : Throats with *some but not all* of the given labels are counted. **'xnor'** : Throats with *more than one* of the given labels are counted. Returns ------- Nt : int Number of throats with the specified labels See Also -------- num_pores count Notes ----- Technically, *'nand'* and *'xnor'* should also count throats with *none* of the labels, however, to make the count more useful these are not included.
def pull(self, platform=None): repository, _ = parse_repository_tag(self.image_name) return self.collection.pull(repository, tag=self.id, platform=platform)
Pull the image digest. Args: platform (str): The platform to pull the image for. Default: ``None`` Returns: (:py:class:`Image`): A reference to the pulled image.
def run_band_structure(self, paths, with_eigenvectors=False, with_group_velocities=False, is_band_connection=False, path_connections=None, labels=None, is_legacy_plot=False): if self._dynamical_matrix is None: msg = ("Dynamical matrix has not yet built.") raise RuntimeError(msg) if with_group_velocities: if self._group_velocity is None: self._set_group_velocity() group_velocity = self._group_velocity else: group_velocity = None self._band_structure = BandStructure( paths, self._dynamical_matrix, with_eigenvectors=with_eigenvectors, is_band_connection=is_band_connection, group_velocity=group_velocity, path_connections=path_connections, labels=labels, is_legacy_plot=is_legacy_plot, factor=self._factor)
Run phonon band structure calculation. Parameters ---------- paths : List of array_like Sets of qpoints that can be passed to phonopy.set_band_structure(). Numbers of qpoints can be different. shape of each array_like : (qpoints, 3) with_eigenvectors : bool, optional Flag whether eigenvectors are calculated or not. Default is False. with_group_velocities : bool, optional Flag whether group velocities are calculated or not. Default is False. is_band_connection : bool, optional Flag whether each band is connected or not. This is achieved by comparing similarity of eigenvectors of neghboring poins. Sometimes this fails. Default is False. path_connections : List of bool, optional This is only used in graphical plot of band structure and gives whether each path is connected to the next path or not, i.e., if False, there is a jump of q-points. Number of elements is the same at that of paths. Default is None. labels : List of str, optional This is only used in graphical plot of band structure and gives labels of end points of each path. The number of labels is equal to (2 - np.array(path_connections)).sum(). is_legacy_plot: bool, optional This makes the old style band structure plot. Default is False.
def githubWebHookConsumer(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
Consume GitHub WebHook Capture a GitHub event and publish it via pulse, if it's a push, release or pull request. This method is ``experimental``
def _forgiving_issubclass(derived_class, base_class): return (type(derived_class) is ClassType and \ type(base_class) is ClassType and \ issubclass(derived_class, base_class))
Forgiving version of ``issubclass`` Does not throw any exception when arguments are not of class type
def readTempC(self): v = self._read32() if v & 0x7: return float('NaN') if v & 0x80000000: v >>= 18 v -= 16384 else: v >>= 18 return v * 0.25
Return the thermocouple temperature value in degrees celsius.
def transform_y(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs): "Set `tfms` to be applied to the ys of the train and validation set." if not tfms: tfms=(None,None) self.train.transform_y(tfms[0], **kwargs) self.valid.transform_y(tfms[1], **kwargs) if self.test: self.test.transform_y(tfms[1], **kwargs) return self
Set `tfms` to be applied to the ys of the train and validation set.
def lonlat2xyz(lon, lat): lat = xu.deg2rad(lat) lon = xu.deg2rad(lon) x = xu.cos(lat) * xu.cos(lon) y = xu.cos(lat) * xu.sin(lon) z = xu.sin(lat) return x, y, z
Convert lon lat to cartesian.
def _get_value(scikit_value, mode = 'regressor', scaling = 1.0, n_classes = 2, tree_index = 0): if mode == 'regressor': return scikit_value[0] * scaling if n_classes == 2: if len(scikit_value[0]) != 1: value = scikit_value[0][1] * scaling / scikit_value[0].sum() else: value = scikit_value[0][0] * scaling if value == 0.5: value = value - 1e-7 else: if len(scikit_value[0]) != 1: value = scikit_value[0] / scikit_value[0].sum() else: value = {tree_index: scikit_value[0] * scaling} return value
Get the right value from the scikit-tree
def re_thresh_csv(path, old_thresh, new_thresh, chan_thresh): from eqcorrscan.core.match_filter import read_detections warnings.warn('Legacy function, please use ' 'eqcorrscan.core.match_filter.Party.rethreshold.') old_detections = read_detections(path) old_thresh = float(old_thresh) new_thresh = float(new_thresh) detections = [] detections_in = 0 detections_out = 0 for detection in old_detections: detections_in += 1 con1 = (new_thresh / old_thresh) * detection.threshold con2 = detection.no_chans >= chan_thresh requirted_thresh = (new_thresh / old_thresh) * detection.threshold con3 = abs(detection.detect_val) >= requirted_thresh if all([con1, con2, con3]): detections_out += 1 detections.append(detection) print('Read in %i detections' % detections_in) print('Left with %i detections' % detections_out) return detections
Remove detections by changing the threshold. Can only be done to remove detection by increasing threshold, threshold lowering will have no effect. :type path: str :param path: Path to the .csv detection file :type old_thresh: float :param old_thresh: Old threshold MAD multiplier :type new_thresh: float :param new_thresh: New threshold MAD multiplier :type chan_thresh: int :param chan_thresh: Minimum number of channels for a detection :returns: List of detections :rtype: list .. rubric:: Example >>> from eqcorrscan.utils.clustering import re_thresh_csv >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> det_file = os.path.join(TEST_PATH, 'expected_tutorial_detections.txt') >>> detections = re_thresh_csv(path=det_file, old_thresh=8, new_thresh=10, ... chan_thresh=3) Read in 22 detections Left with 17 detections .. Note:: This is a legacy function, and will read detections from all versions. .. Warning:: Only works if thresholding was done by MAD.
def ReadHuntOutputPluginsStates(self, hunt_id, cursor=None): columns = ", ".join(_HUNT_OUTPUT_PLUGINS_STATES_COLUMNS) query = ("SELECT {columns} FROM hunt_output_plugins_states " "WHERE hunt_id = %s".format(columns=columns)) rows_returned = cursor.execute(query, [db_utils.HuntIDToInt(hunt_id)]) if rows_returned > 0: states = [] for row in cursor.fetchall(): states.append(self._HuntOutputPluginStateFromRow(row)) return states query = "SELECT hunt_id FROM hunts WHERE hunt_id = %s" rows_returned = cursor.execute(query, [db_utils.HuntIDToInt(hunt_id)]) if rows_returned == 0: raise db.UnknownHuntError(hunt_id) return []
Reads all hunt output plugins states of a given hunt.
def ConsumeRange(self, start, end): old = self.CurrentRange() if old is None: return if old.start > start: if old.start < end: raise RuntimeError('Block end too high.') return if old.start < start: raise RuntimeError('Block start too high.') if old.end == end: del self.ranges[0] elif old.end > end: self.ranges[0] = Range(end, old.end) else: raise RuntimeError('Block length exceeds range.')
Consumes an entire range, or part thereof. If the finger has no ranges left, or the curent range start is higher than the end of the consumed block, nothing happens. Otherwise, the current range is adjusted for the consumed block, or removed, if the entire block is consumed. For things to work, the consumed range and the current finger starts must be equal, and the length of the consumed range may not exceed the length of the current range. Args: start: Beginning of range to be consumed. end: First offset after the consumed range (end + 1). Raises: RuntimeError: if the start position of the consumed range is higher than the start of the current range in the finger, or if the consumed range cuts accross block boundaries.
def read_response(self): response = self._read_response() frame, data = nsq.unpack_response(response) self.last_response = time.time() if frame not in self._frame_handlers: raise errors.NSQFrameError('unknown frame {}'.format(frame)) frame_handler = self._frame_handlers[frame] processed_data = frame_handler(data) return frame, processed_data
Read an individual response from nsqd. :returns: tuple of the frame type and the processed data.
def _toplevel(cls): superclasses = ( list(set(ClosureModel.__subclasses__()) & set(cls._meta.get_parent_list())) ) return next(iter(superclasses)) if superclasses else cls
Find the top level of the chain we're in. For example, if we have: C inheriting from B inheriting from A inheriting from ClosureModel C._toplevel() will return A.
def findContours(*args, **kwargs): if cv2.__version__.startswith('4'): contours, hierarchy = cv2.findContours(*args, **kwargs) elif cv2.__version__.startswith('3'): _, contours, hierarchy = cv2.findContours(*args, **kwargs) else: raise AssertionError( 'cv2 must be either version 3 or 4 to call this method') return contours, hierarchy
Wraps cv2.findContours to maintain compatiblity between versions 3 and 4 Returns: contours, hierarchy
def create_tag(self, version, params): cmd = self._command.tag(version, params) (code, stdout, stderr) = self._exec(cmd) if code: raise errors.VCSError('Can\'t create VCS tag %s. Process exited with code %d and message: %s' % ( version, code, stderr or stdout))
Create VCS tag :param version: :param params: :return:
def generate_keypair(keypair_file): from Crypto.PublicKey import RSA key = RSA.generate(2048) keypair_dir = os.path.dirname(keypair_file) if not os.path.exists(keypair_dir): os.makedirs(keypair_dir) with open(keypair_file, 'wb') as filey: filey.write(key.exportKey('PEM')) return key
generate_keypair is used by some of the helpers that need a keypair. The function should be used if the client doesn't have the attribute self.key. We generate the key and return it. We use pycryptodome (3.7.2) Parameters ========= keypair_file: fullpath to where to save keypair
def get_cache(self): if self.no_cache: self.pkg_list = self.list_packages() return if not os.path.exists(self.yolk_dir): os.mkdir(self.yolk_dir) if os.path.exists(self.pkg_cache_file): self.pkg_list = self.query_cached_package_list() else: self.logger.debug("DEBUG: Fetching package list cache from PyPi...") self.fetch_pkg_list()
Get a package name list from disk cache or PyPI
def validate_arrangement_version(self): arrangement_version = self.build_kwargs['arrangement_version'] if arrangement_version is None: return if arrangement_version <= 5: self.log.warning("arrangement_version <= 5 is deprecated and will be removed" " in release 1.6.38")
Validate if the arrangement_version is supported This is for autorebuilds to fail early otherwise they may failed on workers because of osbs-client validation checks. Method should be called after self.adjust_build_kwargs Shows a warning when version is deprecated :raises ValueError: when version is not supported
def tabular(client, datasets): from renku.models._tabulate import tabulate click.echo( tabulate( datasets, headers=OrderedDict(( ('short_id', 'id'), ('name', None), ('created', None), ('authors_csv', 'authors'), )), ) )
Format datasets with a tabular output.
def to_text(self, fn:str): "Save `self.items` to `fn` in `self.path`." with open(self.path/fn, 'w') as f: f.writelines([f'{o}\n' for o in self._relative_item_paths()])
Save `self.items` to `fn` in `self.path`.
def label(self): with self.selenium.context(self.selenium.CONTEXT_CHROME): return self.root.get_attribute("label")
Provide access to the notification label. Returns: str: The notification label
def putenv(key, value): key = path2fsn(key) value = path2fsn(value) if is_win and PY2: try: set_windows_env_var(key, value) except WindowsError: raise ValueError else: try: os.putenv(key, value) except OSError: raise ValueError
Like `os.putenv` but takes unicode under Windows + Python 2 Args: key (pathlike): The env var to get value (pathlike): The value to set Raises: ValueError
def app_token(vault_client, app_id, user_id): resp = vault_client.auth_app_id(app_id, user_id) if 'auth' in resp and 'client_token' in resp['auth']: return resp['auth']['client_token'] else: raise aomi.exceptions.AomiCredentials('invalid apptoken')
Returns a vault token based on the app and user id.
def is_searchable(self): return self.raw or (self.is_valid_country and (not self.state or self.is_valid_state))
A bool value that indicates whether the address is a valid address to search by.
def sigmasq_series(htilde, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None): htilde = make_frequency_series(htilde) N = (len(htilde)-1) * 2 norm = 4.0 * htilde.delta_f kmin, kmax = get_cutoff_indices(low_frequency_cutoff, high_frequency_cutoff, htilde.delta_f, N) sigma_vec = FrequencySeries(zeros(len(htilde), dtype=real_same_precision_as(htilde)), delta_f = htilde.delta_f, copy=False) mag = htilde.squared_norm() if psd is not None: mag /= psd sigma_vec[kmin:kmax] = mag[kmin:kmax].cumsum() return sigma_vec*norm
Return a cumulative sigmasq frequency series. Return a frequency series containing the accumulated power in the input up to that frequency. Parameters ---------- htilde : TimeSeries or FrequencySeries The input vector psd : {None, FrequencySeries}, optional The psd used to weight the accumulated power. low_frequency_cutoff : {None, float}, optional The frequency to begin accumulating power. If None, start at the beginning of the vector. high_frequency_cutoff : {None, float}, optional The frequency to stop considering accumulated power. If None, continue until the end of the input vector. Returns ------- Frequency Series: FrequencySeries A frequency series containing the cumulative sigmasq.
def load(self, config_template, config_file=None): if config_file is None: config_file = config_template config_path = build_config_file_path(config_file) template_path = os.path.join(os.path.dirname(__file__), config_template) self._copy_template_to_config(template_path, config_path) return self._load_template_or_config(template_path, config_path)
Read the config file if it exists, else read the default config. Creates the user config file if it doesn't exist using the template. :type config_template: str :param config_template: The config template file name. :type config_file: str :param config_file: (Optional) The config file name. If None, the config_file name will be set to the config_template. :rtype: :class:`configobj.ConfigObj` :return: The config information for reading and writing.
def delete_event_view(request, id): event = get_object_or_404(Event, id=id) if not request.user.has_admin_permission('events'): raise exceptions.PermissionDenied if request.method == "POST": try: event.delete() messages.success(request, "Successfully deleted event.") except Event.DoesNotExist: pass return redirect("events") else: return render(request, "events/delete.html", {"event": event})
Delete event page. You may only delete an event if you were the creator or you are an administrator. Confirmation page if not POST. id: event id
def load(self, env=None): self._load() e = env or \ os.environ.get(RUNNING_MODE_ENVKEY, DEFAULT_RUNNING_MODE) if e in self.config: return self.config[e] logging.warn("Environment '%s' was not found.", e)
Load a section values of given environment. If nothing to specified, use environmental variable. If unknown environment was specified, warn it on logger. :param env: environment key to load in a coercive manner :type env: string :rtype: dict
def on_click(self, event): button = event["button"] if button == self.button_toggle: self.toggled = True if self.mode == "ip": self.mode = "status" else: self.mode = "ip" elif button == self.button_refresh: self.idle_time = 0 else: self.py3.prevent_refresh()
Toggle between display modes 'ip' and 'status'
def categorize(func: Union[Callable, Iterable], category: str) -> None: if isinstance(func, Iterable): for item in func: setattr(item, HELP_CATEGORY, category) else: setattr(func, HELP_CATEGORY, category)
Categorize a function. The help command output will group this function under the specified category heading :param func: function to categorize :param category: category to put it in
def _sign_operation(op): md5 = hashlib.md5() md5.update(op.consumerId.encode('utf-8')) md5.update(b'\x00') md5.update(op.operationName.encode('utf-8')) if op.labels: signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels)) return md5.digest()
Obtains a signature for an operation in a ReportRequest. Args: op (:class:`endpoints_management.gen.servicecontrol_v1_messages.Operation`): an operation used in a `ReportRequest` Returns: string: a unique signature for that operation
def keys(self, section=None): if not section and self.section: section = self.section config = self.config.get(section, {}) if section else self.config return config.keys()
Provide dict like keys method
def add(self, interval, offset): start, stop = self.get_start_stop(interval) if len(self.starts) > 0: if start < self.starts[-1] or offset <= self.offsets[-1][1]: raise ValueError('intervals and offsets must be added in-order') self.offsets[-1][1] = offset self.offsets[-1][2] += 1 else: self.starts.append(start) self.stops.append(stop) self.offsets.append([offset, offset, 1])
The added interval must be overlapping or beyond the last stored interval ie. added in sorted order. :param interval: interval to add :param offset: full virtual offset to add :return:
def always_fail(cls, request) -> [ (200, 'Ok', String), (406, 'Not Acceptable', Void)]: task_id = uuid4().hex.upper()[:5] log.info('Starting always FAILING task {}'.format(task_id)) for i in range(randint(0, MAX_LOOP_DURATION)): yield Respond(406) Respond(200, 'Foobar')
Perform an always failing task.