code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
|---|---|
def match(self, route):
_resource = trim_resource(self.resource)
self.method = self.method.lower()
resource_match = route.resource_regex.search(_resource)
if resource_match is None:
return None
params = resource_match.groupdict()
querystring = params.pop("querystring", "")
setattr(self, "param", params)
setattr(self, "query", parse_querystring(querystring))
return copy.deepcopy(self)
|
Match input route and return new Message instance
with parsed content
|
def ExpandRecursiveGlobs(cls, path, path_separator):
glob_regex = r'(.*)?{0:s}\*\*(\d{{1,2}})?({0:s})?$'.format(
re.escape(path_separator))
match = re.search(glob_regex, path)
if not match:
return [path]
skip_first = False
if match.group(3):
skip_first = True
if match.group(2):
iterations = int(match.group(2))
else:
iterations = cls._RECURSIVE_GLOB_LIMIT
logger.warning((
'Path "{0:s}" contains fully recursive glob, limiting to 10 '
'levels').format(path))
return cls.AppendPathEntries(
match.group(1), path_separator, iterations, skip_first)
|
Expands recursive like globs present in an artifact path.
If a path ends in '**', with up to two optional digits such as '**10',
the '**' will recursively match all files and zero or more directories
from the specified path. The optional digits indicate the recursion depth.
By default recursion depth is 10 directories.
If the glob is followed by the specified path segment separator, only
directories and subdirectories will be matched.
Args:
path (str): path to be expanded.
path_separator (str): path segment separator.
Returns:
list[str]: String path expanded for each glob.
|
def _npiter(arr):
for a in np.nditer(arr, flags=["refs_ok"]):
c = a.item()
if c is not None:
yield c
|
Wrapper for iterating numpy array
|
def set_hflip(self, val):
self.__horizontal_flip = val
for image in self.images:
image.h_flip = val
|
Flip all the images in the animation list horizontally.
|
def fader(self, value: int):
self._fader = int(value) if 0 < value < 1024 else 0
self.outport.send(mido.Message('control_change', control=0,
value=self._fader >> 7))
self.outport.send(mido.Message('control_change', control=32,
value=self._fader & 0x7F))
|
Move the fader to a new position in the range 0 to 1023.
|
def get_merge_rules(schema=None):
schema = schema or get_release_schema_url(get_tags()[-1])
if isinstance(schema, dict):
deref_schema = jsonref.JsonRef.replace_refs(schema)
else:
deref_schema = _get_merge_rules_from_url_or_path(schema)
return dict(_get_merge_rules(deref_schema['properties']))
|
Returns merge rules as key-value pairs, in which the key is a JSON path as a tuple, and the value is a list of
merge properties whose values are `true`.
|
def download_models(self, uniprot_acc, outdir='', force_rerun=False):
downloaded = []
subset = self.get_models(uniprot_acc)
for entry in subset:
ident = '{}_{}_{}_{}'.format(uniprot_acc, entry['template'], entry['from'], entry['to'])
outfile = op.join(outdir, ident + '.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = requests.get(entry['url'])
if response.status_code == 404:
log.error('{}: 404 returned, no model available.'.format(ident))
else:
with open(outfile, 'w') as f:
f.write(response.text)
log.debug('{}: downloaded homology model'.format(ident))
downloaded.append(outfile)
else:
downloaded.append(outfile)
return downloaded
|
Download all models available for a UniProt accession number.
Args:
uniprot_acc (str): UniProt ACC/ID
outdir (str): Path to output directory, uses working directory if not set
force_rerun (bool): Force a redownload the models if they already exist
Returns:
list: Paths to the downloaded models
|
def get_files(self, commit, paths, recursive=False):
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
|
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
|
def truncate_money(money: Money) -> Money:
amount = truncate_to(money.amount, money.currency)
return Money(amount, money.currency)
|
Truncates money amount to the number of decimals corresponding to the currency
|
def remove_selected(self, *args):
self.collapse_nested(self.selected)
self.remove(self.selected)
|
Remove the selected catalog - allow the passing of arbitrary
args so that buttons work. Also remove any nested catalogs.
|
def remove_external_references(self):
for ex_ref_node in self.node.findall('externalReferences'):
self.node.remove(ex_ref_node)
|
Removes any external reference from the role
|
def add_partition(self, spec, location=None):
part_schema = self.partition_schema()
stmt = ddl.AddPartition(
self._qualified_name, spec, part_schema, location=location
)
return self._execute(stmt)
|
Add a new table partition, creating any new directories in HDFS if
necessary.
Partition parameters can be set in a single DDL statement, or you can
use alter_partition to set them after the fact.
Returns
-------
None (for now)
|
def write_command_line(self):
cmd = [" ".join(sys.argv)]
try:
previous = self.attrs["cmd"]
if isinstance(previous, str):
previous = [previous]
elif isinstance(previous, numpy.ndarray):
previous = previous.tolist()
except KeyError:
previous = []
self.attrs["cmd"] = cmd + previous
|
Writes command line to attributes.
The command line is written to the file's ``attrs['cmd']``. If this
attribute already exists in the file (this can happen when resuming
from a checkpoint), ``attrs['cmd']`` will be a list storing the current
command line and all previous command lines.
|
def encode_dataset(dataset, vocabulary):
def encode(features):
return {k: vocabulary.encode_tf(v) for k, v in features.items()}
return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1
|
def prune_indices(self, transforms=None):
if self.ndim >= 3:
return self._prune_3d_indices(transforms)
def prune_non_3d_indices(transforms):
row_margin = self._pruning_base(
hs_dims=transforms, axis=self.row_direction_axis
)
row_indices = self._margin_pruned_indices(
row_margin, self._inserted_dim_inds(transforms, 0), 0
)
if row_indices.ndim > 1:
row_indices = row_indices.all(axis=1)
if self.ndim == 1:
return [row_indices]
col_margin = self._pruning_base(
hs_dims=transforms, axis=self._col_direction_axis
)
col_indices = self._margin_pruned_indices(
col_margin, self._inserted_dim_inds(transforms, 1), 1
)
if col_indices.ndim > 1:
col_indices = col_indices.all(axis=0)
return [row_indices, col_indices]
return prune_non_3d_indices(transforms)
|
Return indices of pruned rows and columns as list.
The return value has one of three possible forms:
* a 1-element list of row indices (in case of 1D cube)
* 2-element list of row and col indices (in case of 2D cube)
* n-element list of tuples of 2 elements (if it's 3D cube).
For each case, the 2 elements are the ROW and COL indices of the
elements that need to be pruned. If it's a 3D cube, these indices are
calculated "per slice", that is NOT on the 0th dimension (as the 0th
dimension represents the slices).
|
def comment_stream(reddit_session, subreddit, limit=None, verbosity=1):
get_function = partial(reddit_session.get_comments,
six.text_type(subreddit))
return _stream_generator(get_function, limit, verbosity)
|
Indefinitely yield new comments from the provided subreddit.
Comments are yielded from oldest to newest.
:param reddit_session: The reddit_session to make requests from. In all the
examples this is assigned to the variable ``r``.
:param subreddit: Either a subreddit object, or the name of a
subreddit. Use `all` to get the comment stream for all comments made to
reddit.
:param limit: The maximum number of comments to fetch in a single
iteration. When None, fetch all available comments (reddit limits this
to 1000 (or multiple of 1000 for multi-subreddits). If this number is
too small, comments may be missed.
:param verbosity: A number that controls the amount of output produced to
stderr. <= 0: no output; >= 1: output the total number of comments
processed and provide the short-term number of comments processed per
second; >= 2: output when additional delays are added in order to avoid
subsequent unexpected http errors. >= 3: output debugging information
regarding the comment stream. (Default: 1)
|
def unlock(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_unlock_codec, key_data, key=key_data, thread_id=thread_id(),
reference_id=self.reference_id_generator.get_and_increment())
|
Releases the lock for the specified key. It never blocks and returns immediately. If the current thread is the
holder of this lock, then the hold count is decremented. If the hold count is zero, then the lock is released.
:param key: (object), the key to lock.
|
def zharkov_pel(v, temp, v0, e0, g, n, z, t_ref=300.,
three_r=3. * constants.R):
v_mol = vol_uc2mol(v, z)
x = v / v0
def f(t):
return three_r * n / 2. * e0 * np.power(x, g) * np.power(t, 2.) * \
g / v_mol * 1.e-9
return f(temp) - f(t_ref)
|
calculate electronic contributions in pressure for the Zharkov equation
the equation can be found in Sokolova and Dorogokupets 2013
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param e0: parameter in K-1 for the Zharkov equation
:param g: parameter for the Zharkov equation
:param n: number of atoms in a formula unit
:param z: number of formula unit in a unit cell
:param t_ref: reference temperature, 300 K
:param three_r: 3 times gas constant
:return: electronic contribution in GPa
|
def get_plugin_by_model(self, model_class):
self._import_plugins()
assert issubclass(model_class, ContentItem)
try:
name = self._name_for_model[model_class]
except KeyError:
raise PluginNotFound("No plugin found for model '{0}'.".format(model_class.__name__))
return self.plugins[name]
|
Return the corresponding plugin for a given model.
You can also use the :attr:`ContentItem.plugin <fluent_contents.models.ContentItem.plugin>` property directly.
This is the low-level function that supports that feature.
|
def set_logger_level(logger_name, log_level='error'):
logging.getLogger(logger_name).setLevel(
LOG_LEVELS.get(log_level.lower(), logging.ERROR)
)
|
Tweak a specific logger's logging level
|
def set_legend_position(self, legend_position):
if legend_position:
self.legend_position = quote(legend_position)
else:
self.legend_position = None
|
Sets legend position. Default is 'r'.
b - At the bottom of the chart, legend entries in a horizontal row.
bv - At the bottom of the chart, legend entries in a vertical column.
t - At the top of the chart, legend entries in a horizontal row.
tv - At the top of the chart, legend entries in a vertical column.
r - To the right of the chart, legend entries in a vertical column.
l - To the left of the chart, legend entries in a vertical column.
|
def elapsed(self):
if self.end is None:
return (self() - self.start) * self.factor
else:
return (self.end - self.start) * self.factor
|
Return the current elapsed time since start
If the `elapsed` property is called in the context manager scope,
the elapsed time bewteen start and property access is returned.
However, if it is accessed outside of the context manager scope,
it returns the elapsed time bewteen entering and exiting the scope.
The `elapsed` property can thus be accessed at different points within
the context manager scope, to time different parts of the block.
|
def get(self, field):
if field in ('username', 'uuid', 'app_data'):
return self.data[field]
else:
return self.data.get('app_data', {})[field]
|
Returns the value of a user field.
:param str field:
The name of the user field.
:returns: str -- the value
|
def get_status(self):
if self.status is not None:
return self.status
if self.subsection == "dmdSec":
if self.older is None:
return "original"
else:
return "updated"
if self.subsection in ("techMD", "rightsMD"):
if self.newer is None:
return "current"
else:
return "superseded"
return None
|
Returns the STATUS when serializing.
Calculates based on the subsection type and if it's replacing anything.
:returns: None or the STATUS string.
|
def dequeue(self) -> Tuple[int, TItem]:
if self._len == 0:
raise ValueError('BucketPriorityQueue is empty.')
while self._buckets and not self._buckets[0]:
self._buckets.pop(0)
self._offset += 1
item = self._buckets[0].pop(0)
priority = self._offset
self._len -= 1
if self._drop_set is not None:
self._drop_set.remove((priority, item))
return priority, item
|
Removes and returns an item from the priority queue.
Returns:
A tuple whose first element is the priority of the dequeued item
and whose second element is the dequeued item.
Raises:
ValueError:
The queue is empty.
|
def print_num(num):
out('hex: 0x{0:08x}'.format(num))
out('dec: {0:d}'.format(num))
out('oct: 0o{0:011o}'.format(num))
out('bin: 0b{0:032b}'.format(num))
|
Write a numeric result in various forms
|
def publish_scene_add(self, scene_id, animation_id, name, color, velocity, config):
self.sequence_number += 1
self.publisher.send_multipart(msgs.MessageBuilder.scene_add(self.sequence_number, scene_id, animation_id, name, color, velocity, config))
return self.sequence_number
|
publish added scene
|
def launch(self, callback_function=None):
self._check_registered()
self._socket_client.receiver_controller.launch_app(
self.supporting_app_id, callback_function=callback_function)
|
If set, launches app related to the controller.
|
def diff(name, **kwargs):
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
ret['changes'] = __salt__['junos.diff'](**kwargs)
return ret
|
Gets the difference between the candidate and the current configuration.
.. code-block:: yaml
get the diff:
junos:
- diff
- id: 10
Parameters:
Optional
* id:
The rollback id value [0-49]. (default = 0)
|
def _names_to_bytes(names):
names = sorted(names)
names_bytes = json.dumps(names).encode('utf8')
return names_bytes
|
Reproducibly converts an iterable of strings to bytes
:param iter[str] names: An iterable of strings
:rtype: bytes
|
def set_user_password(name, passwd, **client_args):
if not user_exists(name, **client_args):
log.info('User \'%s\' does not exist', name)
return False
client = _client(**client_args)
client.set_user_password(name, passwd)
return True
|
Change password of a user.
name
Name of the user for whom to set the password.
passwd
New password of the user.
CLI Example:
.. code-block:: bash
salt '*' influxdb.set_user_password <name> <password>
|
def create_knowledge_base(project_id, display_name):
import dialogflow_v2beta1 as dialogflow
client = dialogflow.KnowledgeBasesClient()
project_path = client.project_path(project_id)
knowledge_base = dialogflow.types.KnowledgeBase(
display_name=display_name)
response = client.create_knowledge_base(project_path, knowledge_base)
print('Knowledge Base created:\n')
print('Display Name: {}\n'.format(response.display_name))
print('Knowledge ID: {}\n'.format(response.name))
|
Creates a Knowledge base.
Args:
project_id: The GCP project linked with the agent.
display_name: The display name of the Knowledge base.
|
def get_axis_value_discrete(self, axis):
if self.type != EventType.POINTER_AXIS:
raise AttributeError(_wrong_meth.format(self.type))
return self._libinput.libinput_event_pointer_get_axis_value_discrete(
self._handle, axis)
|
Return the axis value in discrete steps for a given axis event.
How a value translates into a discrete step depends on the source.
If the source is :attr:`~libinput.constant.PointerAxisSource.WHEEL`,
the discrete value correspond to the number of physical mouse wheel
clicks.
If the source is :attr:`~libinput.constant.PointerAxisSource.CONTINUOUS`
or :attr:`~libinput.constant.PointerAxisSource.FINGER`, the discrete
value is always 0.
Args:
axis (~libinput.constant.PointerAxis): The axis who's value to get.
Returns:
float: The discrete value for the given event.
Raises:
AttributeError
|
def set_session(self, headers=None):
if headers is None:
headers = {
'User-Agent':
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3)'
' AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/48.0.2564.116 Safari/537.36')
}
elif not isinstance(headers, dict):
raise TypeError('"headers" must be a dict object')
self.session = Session(self.proxy_pool)
self.session.headers.update(headers)
|
Init session with default or custom headers
Args:
headers: A dict of headers (default None, thus using the default
header to init the session)
|
def event_return(events):
options = _get_options()
index = options['master_event_index']
doc_type = options['master_event_doc_type']
if options['index_date']:
index = '{0}-{1}'.format(index,
datetime.date.today().strftime('%Y.%m.%d'))
_ensure_index(index)
for event in events:
data = {
'tag': event.get('tag', ''),
'data': event.get('data', '')
}
ret = __salt__['elasticsearch.document_create'](index=index,
doc_type=doc_type,
id=uuid.uuid4(),
body=salt.utils.json.dumps(data))
|
Return events to Elasticsearch
Requires that the `event_return` configuration be set in master config.
|
def lemmatize(text, lowercase=True, remove_stopwords=True):
doc = nlp(text)
if lowercase and remove_stopwords:
lemmas = [t.lemma_.lower() for t in doc if not (t.is_stop or t.orth_.lower() in STOPWORDS)]
elif lowercase:
lemmas = [t.lemma_.lower() for t in doc]
elif remove_stopwords:
lemmas = [t.lemma_ for t in doc if not (t.is_stop or t.orth_.lower() in STOPWORDS)]
else:
lemmas = [t.lemma_ for t in doc]
return lemmas
|
Return the lemmas of the tokens in a text.
|
def stop(self):
if self.original_attributes is not None:
termios.tcsetattr(
self.fd,
termios.TCSADRAIN,
self.original_attributes,
)
|
Restores the terminal attributes back to before setting raw mode.
If the raw terminal was not started, does nothing.
|
def _compile_signature(self, iexec, call_name):
if iexec is not None:
summary = iexec.summary
if isinstance(iexec, Function):
summary = iexec.returns + "| " + iexec.summary
elif isinstance(iexec, Subroutine) and len(iexec.modifiers) > 0:
summary = ", ".join(iexec.modifiers) + " | " + iexec.summary
elif isinstance(iexec, Interface):
summary = iexec.describe()
else:
summary = iexec.summary
if iexec.parent is not None:
summary += " | MODULE: {}".format(iexec.module.name)
else:
summary += " | BUILTIN"
return dict(
params=[p.name for p in iexec.ordered_parameters],
index=0,
call_name=call_name,
description=summary,
)
else:
return []
|
Compiles the signature for the specified executable and returns
as a dictionary.
|
def get(self, key, bucket):
try:
return self._cache[bucket][key]
except (KeyError, TypeError):
return None
|
Get a cached item by key
If the cached item isn't found the return None.
|
def rmse(params1, params2):
r
assert len(params1) == len(params2)
params1 = np.asarray(params1) - np.mean(params1)
params2 = np.asarray(params2) - np.mean(params2)
sqrt_n = math.sqrt(len(params1))
return np.linalg.norm(params1 - params2, ord=2) / sqrt_n
|
r"""Compute the root-mean-squared error between two models.
Parameters
----------
params1 : array_like
Parameters of the first model.
params2 : array_like
Parameters of the second model.
Returns
-------
error : float
Root-mean-squared error.
|
def by_readings(self, role_names=['', 'Author']):
if not spectator_apps.is_enabled('reading'):
raise ImproperlyConfigured("To use the CreatorManager.by_readings() method, 'spectator.reading' must by in INSTALLED_APPS.")
qs = self.get_queryset()
qs = qs.filter(publication_roles__role_name__in=role_names) \
.exclude(publications__reading__isnull=True) \
.annotate(num_readings=Count('publications__reading')) \
.order_by('-num_readings', 'name_sort')
return qs
|
The Creators who have been most-read, ordered by number of readings.
By default it will only include Creators whose role was left empty,
or is 'Author'.
Each Creator will have a `num_readings` attribute.
|
def get_property_by_hash(self, property_hash: str) -> Optional[Property]:
return self.session.query(Property).filter(Property.sha512 == property_hash).one_or_none()
|
Get a property by its hash if it exists.
|
def activate(self):
response = self._manager.activate(self.ID)
self._update(response["Bounce"])
return response["Message"]
|
Activates the bounce instance and updates it with the latest data.
:return: Activation status.
:rtype: `str`
|
def _create_date_slug(self):
if not self.pk:
d = utc_now()
elif self.published and self.published_on:
d = self.published_on
elif self.updated_on:
d = self.updated_on
self.date_slug = u"{0}/{1}".format(d.strftime("%Y/%m/%d"), self.slug)
|
Prefixes the slug with the ``published_on`` date.
|
def ExecuteRaw(self, position, command):
self.EnsureGdbPosition(position[0], None, None)
return gdb.execute(command, to_string=True)
|
Send a command string to gdb.
|
def add_bases(cls, *bases):
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
|
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
|
def __handle_events(self):
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
self.exit()
|
This is the place to put all event handeling.
|
def transform_deprecated_concepts(rdf, cs):
deprecated_concepts = []
for conc in rdf.subjects(RDF.type, SKOSEXT.DeprecatedConcept):
rdf.add((conc, RDF.type, SKOS.Concept))
rdf.add((conc, OWL.deprecated, Literal("true", datatype=XSD.boolean)))
deprecated_concepts.append(conc)
if len(deprecated_concepts) > 0:
ns = cs.replace(localname(cs), '')
dcs = create_concept_scheme(
rdf, ns, 'deprecatedconceptscheme')
logging.debug("creating deprecated concept scheme %s", dcs)
for conc in deprecated_concepts:
rdf.add((conc, SKOS.inScheme, dcs))
|
Transform deprecated concepts so they are in their own concept
scheme.
|
def register(self, mimetype):
def dec(func):
self._reg[mimetype] = func
return func
return dec
|
Register a function to handle a particular mimetype.
|
def hook_outputs(modules:Collection[nn.Module], detach:bool=True, grad:bool=False)->Hooks:
"Return `Hooks` that store activations of all `modules` in `self.stored`"
return Hooks(modules, _hook_inner, detach=detach, is_forward=not grad)
|
Return `Hooks` that store activations of all `modules` in `self.stored`
|
def suspend(self):
vm_state = yield from self._get_vm_state()
if vm_state == "running":
yield from self._control_vm("pause")
self.status = "suspended"
log.info("VirtualBox VM '{name}' [{id}] suspended".format(name=self.name, id=self.id))
else:
log.warn("VirtualBox VM '{name}' [{id}] cannot be suspended, current state: {state}".format(name=self.name,
id=self.id,
state=vm_state))
|
Suspends this VirtualBox VM.
|
def update(self, name=None, email=None, blog=None, company=None,
location=None, hireable=False, bio=None):
user = {'name': name, 'email': email, 'blog': blog,
'company': company, 'location': location,
'hireable': hireable, 'bio': bio}
self._remove_none(user)
url = self._build_url('user')
json = self._json(self._patch(url, data=dumps(user)), 200)
if json:
self._update_(json)
return True
return False
|
If authenticated as this user, update the information with
the information provided in the parameters.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., 'john.smith@example.com'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company:
:param str location:
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
|
def decode_example(self, serialized_example):
data_fields, data_items_to_decoders = self.example_reading_spec()
data_fields["batch_prediction_key"] = tf.FixedLenFeature([1], tf.int64, 0)
if data_items_to_decoders is None:
data_items_to_decoders = {
field: tf.contrib.slim.tfexample_decoder.Tensor(field)
for field in data_fields
}
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(sorted(data_items_to_decoders))
decoded = decoder.decode(serialized_example, items=decode_items)
return dict(zip(decode_items, decoded))
|
Return a dict of Tensors from a serialized tensorflow.Example.
|
def run_command(cmd, *args):
command = ' '.join((cmd, args))
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
return p.retcode, stdout, stderr
|
Runs command on the system with given ``args``.
|
def update_statistics(self, activityVectors):
Y = activityVectors
n = self.output_size
A = np.zeros((n, n))
batchSize = len(Y)
for y in Y:
active_units = np.where( y == 1 )[0]
for i in active_units:
for j in active_units:
A[i,j] += 1.
A = A/batchSize
self.average_activity = self.exponential_moving_average(self.average_activity, A, self.smoothing_period)
|
Updates the variable that maintains exponential moving averages of
individual and pairwise unit activiy
|
def t_escaped_CARRIAGE_RETURN_CHAR(self, t):
r'\x72'
t.lexer.pop_state()
t.value = unichr(0x000d)
return t
|
r'\x72
|
def _split_url_string(query_string):
parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
|
Turns a `query_string` into a Python dictionary with unquoted values
|
def eeg_create_mne_events(onsets, conditions=None):
event_id = {}
if conditions is None:
conditions = ["Event"] * len(onsets)
if len(conditions) != len(onsets):
print("NeuroKit Warning: eeg_create_events(): conditions parameter of different length than onsets. Aborting.")
return()
event_names = list(set(conditions))
event_index = list(range(len(event_names)))
for i in enumerate(event_names):
conditions = [event_index[i[0]] if x==i[1] else x for x in conditions]
event_id[i[1]] = event_index[i[0]]
events = np.array([onsets, [0]*len(onsets), conditions]).T
return(events, event_id)
|
Create MNE compatible events.
Parameters
----------
onsets : list or array
Events onsets.
conditions : list
A list of equal length containing the stimuli types/conditions.
Returns
----------
(events, event_id) : tuple
MNE-formated events and a dictionary with event's names.
Example
----------
>>> import neurokit as nk
>>> events, event_id = nk.eeg_create_mne_events(events_onset, conditions)
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
|
def delete(self, table_id):
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
pass
except self.http_error as ex:
self.process_http_error(ex)
|
Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
|
def _list_queues():
queue_dir = __opts__['sqlite_queue_dir']
files = os.path.join(queue_dir, '*.db')
paths = glob.glob(files)
queues = [os.path.splitext(os.path.basename(item))[0] for item in paths]
return queues
|
Return a list of sqlite databases in the queue_dir
|
def disassemble(self, data, start_address=0):
return _opcodes.disassemble(self._ptr, data, start_address)
|
Return a list containing the virtual memory address, instruction length
and disassembly code for the given binary buffer.
|
def get(self):
tasks = self._get_avaliable_tasks()
if not tasks:
return None
name, data = tasks[0]
self._client.kv.delete(name)
return data
|
Get a task from the queue.
|
def open(self):
if self._is_open:
raise HIDException("Failed to open device: HIDDevice already open")
path = self.path.encode('utf-8')
dev = hidapi.hid_open_path(path)
if dev:
self._is_open = True
self._device = dev
else:
raise HIDException("Failed to open device")
|
Open the HID device for reading and writing.
|
def _normalize_compare_config(self, diff):
ignore_strings = [
"Contextual Config Diffs",
"No changes were found",
"ntp clock-period",
]
if self.auto_file_prompt:
ignore_strings.append("file prompt quiet")
new_list = []
for line in diff.splitlines():
for ignore in ignore_strings:
if ignore in line:
break
else:
new_list.append(line)
return "\n".join(new_list)
|
Filter out strings that should not show up in the diff.
|
def find_peakset(dataset, basecolumn=-1, method='', where=None):
peakset = []
where_i = None
for data in dataset:
base = data[basecolumn]
base = maidenhair.statistics.average(base)
if where:
adata = [maidenhair.statistics.average(x) for x in data]
where_i = np.where(where(adata))
base = base[where_i]
index = getattr(np, method, np.argmax)(base)
for a, axis in enumerate(data):
if len(peakset) <= a:
peakset.append([])
if where_i:
axis = axis[where_i]
peakset[a].append(axis[index])
peakset = np.array(peakset)
return peakset
|
Find peakset from the dataset
Parameters
-----------
dataset : list
A list of data
basecolumn : int
An index of column for finding peaks
method : str
A method name of numpy for finding peaks
where : function
A function which recieve ``data`` and return numpy indexing list
Returns
-------
list
A list of peaks of each axis (list)
|
def get_text_contents(self):
contents = self.get_contents()
if contents[:len(codecs.BOM_UTF8)] == codecs.BOM_UTF8:
return contents[len(codecs.BOM_UTF8):].decode('utf-8')
if contents[:len(codecs.BOM_UTF16_LE)] == codecs.BOM_UTF16_LE:
return contents[len(codecs.BOM_UTF16_LE):].decode('utf-16-le')
if contents[:len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE:
return contents[len(codecs.BOM_UTF16_BE):].decode('utf-16-be')
try:
return contents.decode('utf-8')
except UnicodeDecodeError as e:
try:
return contents.decode('latin-1')
except UnicodeDecodeError as e:
return contents.decode('utf-8', error='backslashreplace')
|
This attempts to figure out what the encoding of the text is
based upon the BOM bytes, and then decodes the contents so that
it's a valid python string.
|
def get_xml(html, content_tag='ekb', fail_if_empty=False):
cont = re.findall(r'<%(tag)s(.*?)>(.*?)</%(tag)s>' % {'tag': content_tag},
html, re.MULTILINE | re.DOTALL)
if cont:
events_terms = ''.join([l.strip() for l in cont[0][1].splitlines()])
if 'xmlns' in cont[0][0]:
meta = ' '.join([l.strip() for l in cont[0][0].splitlines()])
else:
meta = ''
else:
events_terms = ''
meta = ''
if fail_if_empty:
assert events_terms != '',\
"Got empty string for events content from html:\n%s" % html
header = ('<?xml version="1.0" encoding="utf-8" standalone="yes"?><%s%s>'
% (content_tag, meta))
footer = '</%s>' % content_tag
return header + events_terms.replace('\n', '') + footer
|
Extract the content XML from the HTML output of the TRIPS web service.
Parameters
----------
html : str
The HTML output from the TRIPS web service.
content_tag : str
The xml tag used to label the content. Default is 'ekb'.
fail_if_empty : bool
If True, and if the xml content found is an empty string, raise an
exception. Default is False.
Returns
-------
The extraction knowledge base (e.g. EKB) XML that contains the event and
term extractions.
|
def validate(self):
if (self.scheme is None or self.scheme != '') \
and (self.host is None or self.host == ''):
return False
return True
|
Validates the URL object. The URL object is invalid if it does not represent an absolute URL.
Returns True or False based on this.
|
def startElement(self, name, attrs):
self.stack.append((self.current, self.chardata))
self.current = {}
self.chardata = []
|
Initialize new node and store current node into stack.
|
def get_account_history(self, account_id, **kwargs):
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
|
List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
|
def reorient_z(structure):
struct = structure.copy()
sop = get_rot(struct)
struct.apply_operation(sop)
return struct
|
reorients a structure such that the z axis is concurrent with the
normal to the A-B plane
|
def compute_node_positions(self):
xs = []
ys = []
self.locs = dict()
for node in self.nodes:
x = self.graph.node[node][self.node_lon]
y = self.graph.node[node][self.node_lat]
xs.append(x)
ys.append(y)
self.locs[node] = (x, y)
self.node_coords = {"x": xs, "y": ys}
|
Extracts the node positions based on the specified longitude and
latitude keyword arguments.
|
def list(self, argv):
def read(index):
print(index.name)
for key in sorted(index.content.keys()):
value = index.content[key]
print(" %s: %s" % (key, value))
if len(argv) == 0:
for index in self.service.indexes:
count = index['totalEventCount']
print("%s (%s)" % (index.name, count))
else:
self.foreach(argv, read)
|
List available indexes if no names provided, otherwise list the
properties of the named indexes.
|
def invoked(self, ctx):
if not ctx.ansi.is_enabled:
print("You need color support to use this demo")
else:
print(ctx.ansi.cmd('erase_display'))
self._demo_fg_color(ctx)
self._demo_bg_color(ctx)
self._demo_bg_indexed(ctx)
self._demo_rgb(ctx)
self._demo_style(ctx)
|
Method called when the command is invoked.
|
def _regexSearchKeyValueCombo(policy_data, policy_regpath, policy_regkey):
if policy_data:
specialValueRegex = salt.utils.stringutils.to_bytes(r'(\*\*Del\.|\*\*DelVals\.){0,1}')
_thisSearch = b''.join([salt.utils.stringutils.to_bytes(r'\['),
re.escape(policy_regpath),
b'\00;',
specialValueRegex,
re.escape(policy_regkey),
b'\00;'])
match = re.search(_thisSearch, policy_data, re.IGNORECASE)
if match:
return policy_data[match.start():(policy_data.index(b']', match.end())) + 2]
return None
|
helper function to do a search of Policy data from a registry.pol file
for a policy_regpath and policy_regkey combo
|
def change_node_subscriptions(self, jid, node, subscriptions_to_set):
iq = aioxmpp.stanza.IQ(
type_=aioxmpp.structs.IQType.SET,
to=jid,
payload=pubsub_xso.OwnerRequest(
pubsub_xso.OwnerSubscriptions(
node,
subscriptions=[
pubsub_xso.OwnerSubscription(
jid,
subscription
)
for jid, subscription in subscriptions_to_set
]
)
)
)
yield from self.client.send(iq)
|
Update the subscriptions at a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the node to modify
:type node: :class:`str`
:param subscriptions_to_set: The subscriptions to set at the node.
:type subscriptions_to_set: :class:`~collections.abc.Iterable` of
tuples consisting of the JID to (un)subscribe and the subscription
level to use.
:raises aioxmpp.errors.XMPPError: as returned by the service
`subscriptions_to_set` must be an iterable of pairs (`jid`,
`subscription`), where the `jid` indicates the JID for which the
`subscription` is to be set.
|
def _get_geocoding(self, key, location):
url = self._location_query_base % quote_plus(key)
if self.api_key:
url += "&key=%s" % self.api_key
data = self._read_from_url(url)
response = json.loads(data)
if response["status"] == "OK":
formatted_address = response["results"][0]["formatted_address"]
pos = formatted_address.find(",")
if pos == -1:
location.name = formatted_address
location.region = ""
else:
location.name = formatted_address[:pos].strip()
location.region = formatted_address[pos + 1 :].strip()
geo_location = response["results"][0]["geometry"]["location"]
location.latitude = float(geo_location["lat"])
location.longitude = float(geo_location["lng"])
else:
raise AstralError("GoogleGeocoder: Unable to locate %s. Server Response=%s" %
(key, response["status"]))
|
Lookup the Google geocoding API information for `key`
|
def _get_external_workers(worker):
worker_that_blocked_task = collections.defaultdict(set)
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_worker_id == worker._id or not other_task:
continue
worker_that_blocked_task[other_worker_id].add(other_task)
return worker_that_blocked_task
|
This returns a dict with a set of tasks for all of the other workers
|
def process_request_thread(self, request, client_address):
from ..blockstackd import get_gc_thread
try:
self.finish_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
finally:
self.shutdown_request(request)
shutdown_thread = False
with self._thread_guard:
if threading.current_thread().ident in self._threads:
del self._threads[threading.current_thread().ident]
shutdown_thread = True
if BLOCKSTACK_TEST:
log.debug('{} active threads (removed {})'.format(len(self._threads), threading.current_thread().ident))
if shutdown_thread:
gc_thread = get_gc_thread()
if gc_thread:
gc_thread.gc_event()
|
Same as in BaseServer but as a thread.
In addition, exception handling is done here.
|
def get_events_with_error_code(event_number, event_status, select_mask=0b1111111111111111, condition=0b0000000000000000):
logging.debug("Calculate events with certain error code")
return np.unique(event_number[event_status & select_mask == condition])
|
Selects the events with a certain error code.
Parameters
----------
event_number : numpy.array
event_status : numpy.array
select_mask : int
The mask that selects the event error code to check.
condition : int
The value the selected event error code should have.
Returns
-------
numpy.array
|
def submit_statsd_measurements(self):
for key, value in self.measurement.counters.items():
self.statsd.incr(key, value)
for key, values in self.measurement.durations.items():
for value in values:
self.statsd.add_timing(key, value)
for key, value in self.measurement.values.items():
self.statsd.set_gauge(key, value)
for key, value in self.measurement.tags.items():
if isinstance(value, bool):
if value:
self.statsd.incr(key)
elif isinstance(value, str):
if value:
self.statsd.incr('{}.{}'.format(key, value))
elif isinstance(value, int):
self.statsd.incr(key, value)
else:
LOGGER.warning('The %s value type of %s is unsupported',
key, type(value))
|
Submit a measurement for a message to statsd as individual items.
|
def _synchronized(meth):
@functools.wraps(meth)
def wrapper(self, *args, **kwargs):
with self._lock:
return meth(self, *args, **kwargs)
return wrapper
|
Call method while holding a lock.
|
def create_kernel_spec(self, is_cython=False,
is_pylab=False, is_sympy=False):
CONF.set('main', 'spyder_pythonpath',
self.main.get_spyder_pythonpath())
return SpyderKernelSpec(is_cython=is_cython,
is_pylab=is_pylab,
is_sympy=is_sympy)
|
Create a kernel spec for our own kernels
|
def send_data(self, **kwargs):
put_url = None
if 'put_url' in kwargs:
put_url = kwargs['put_url']
else:
put_url = self.put_upload_url
if 'data' not in kwargs:
raise AttributeError("'data' parameter is required")
if not put_url:
raise AttributeError("'put_url' cannot be None")
if not isinstance(kwargs['data'], str):
raise TypeError("'data' parameter must be of type 'str'")
response = GettRequest().put(put_url, kwargs['data'])
if response.http_status == 200:
return True
|
This method transmits data to the Gett service.
Input:
* ``put_url`` A PUT url to use when transmitting the data (required)
* ``data`` A byte stream (required)
Output:
* ``True``
Example::
if file.send_data(put_url=file.upload_url, data=open("example.txt", "rb").read()):
print "Your file has been uploaded."
|
def pydeps(**args):
_args = args if args else cli.parse_args(sys.argv[1:])
inp = target.Target(_args['fname'])
log.debug("Target: %r", inp)
if _args.get('output'):
_args['output'] = os.path.abspath(_args['output'])
else:
_args['output'] = os.path.join(
inp.calling_dir,
inp.modpath.replace('.', '_') + '.' + _args.get('format', 'svg')
)
with inp.chdir_work():
_args['fname'] = inp.fname
_args['isdir'] = inp.is_dir
if _args.get('externals'):
del _args['fname']
exts = externals(inp, **_args)
print(json.dumps(exts, indent=4))
return exts
else:
return _pydeps(inp, **_args)
|
Entry point for the ``pydeps`` command.
This function should do all the initial parameter and environment
munging before calling ``_pydeps`` (so that function has a clean
execution path).
|
def parse_objective_coefficient(entry):
for parameter in entry.kinetic_law_reaction_parameters:
pid, name, value, units = parameter
if (pid == 'OBJECTIVE_COEFFICIENT' or
name == 'OBJECTIVE_COEFFICIENT'):
return value
return None
|
Return objective value for reaction entry.
Detect objectives that are specified using the non-standardized
kinetic law parameters which are used by many pre-FBC SBML models. The
objective coefficient is returned for the given reaction, or None if
undefined.
Args:
entry: :class:`SBMLReactionEntry`.
|
def collect_filepaths(self, directories):
plugin_filepaths = set()
directories = util.to_absolute_paths(directories)
for directory in directories:
filepaths = util.get_filepaths_from_dir(directory)
filepaths = self._filter_filepaths(filepaths)
plugin_filepaths.update(set(filepaths))
plugin_filepaths = self._remove_blacklisted(plugin_filepaths)
return plugin_filepaths
|
Collects and returns every filepath from each directory in
`directories` that is filtered through the `file_filters`.
If no `file_filters` are present, passes every file in directory
as a result.
Always returns a `set` object
`directories` can be a object or an iterable. Recommend using
absolute paths.
|
def write_gif(dataset, filename, fps=10):
try:
check_dataset(dataset)
except ValueError as e:
dataset = try_fix_dataset(dataset)
check_dataset(dataset)
delay_time = 100 // int(fps)
def encode(d):
four_d = isinstance(dataset, numpy.ndarray) and len(dataset.shape) == 4
if four_d or not isinstance(dataset, numpy.ndarray):
return _make_animated_gif(d, delay_time=delay_time)
else:
return _make_gif(d)
with open(filename, 'wb') as outfile:
outfile.write(HEADER)
for block in encode(dataset):
outfile.write(block)
outfile.write(TRAILER)
|
Write a NumPy array to GIF 89a format.
Or write a list of NumPy arrays to an animation (GIF 89a format).
- Positional arguments::
:param dataset: A NumPy arrayor list of arrays with shape
rgb x rows x cols and integer values in [0, 255].
:param filename: The output file that will contain the GIF image.
:param fps: The (integer) frames/second of the animation (default 10).
:type dataset: a NumPy array or list of NumPy arrays.
:return: None
- Example: a minimal array, with one red pixel, would look like this::
import numpy as np
one_red_pixel = np.array([[[255]], [[0]], [[0]]])
write_gif(one_red_pixel, 'red_pixel.gif')
..raises:: ValueError
|
def iterkeys(self, key_type=None, return_all_keys=False):
if(key_type is not None):
the_key = str(key_type)
if the_key in self.__dict__:
for key in self.__dict__[the_key].keys():
if return_all_keys:
yield self.__dict__[the_key][key]
else:
yield key
else:
for keys in self.items_dict.keys():
yield keys
|
Returns an iterator over the dictionary's keys.
@param key_type if specified, iterator for a dictionary of this type will be used.
Otherwise (if not specified) tuples containing all (multiple) keys
for this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type.
|
def _validate_allowed_settings(self, application_id, application_config, allowed_settings):
for setting_key in application_config.keys():
if setting_key not in allowed_settings:
raise ImproperlyConfigured(
"Platform {}, app {} does not support the setting: {}.".format(
application_config["PLATFORM"], application_id, setting_key
)
)
|
Confirm only allowed settings are present.
|
def ifilter(self, recursive=True, matches=None, flags=FLAGS,
forcetype=None):
gen = self._indexed_ifilter(recursive, matches, flags, forcetype)
return (node for i, node in gen)
|
Iterate over nodes in our list matching certain conditions.
If *forcetype* is given, only nodes that are instances of this type (or
tuple of types) are yielded. Setting *recursive* to ``True`` will
iterate over all children and their descendants. ``RECURSE_OTHERS``
will only iterate over children that are not the instances of
*forcetype*. ``False`` will only iterate over immediate children.
``RECURSE_OTHERS`` can be used to iterate over all un-nested templates,
even if they are inside of HTML tags, like so:
>>> code = mwparserfromhell.parse("{{foo}}<b>{{foo|{{bar}}}}</b>")
>>> code.filter_templates(code.RECURSE_OTHERS)
["{{foo}}", "{{foo|{{bar}}}}"]
*matches* can be used to further restrict the nodes, either as a
function (taking a single :class:`.Node` and returning a boolean) or a
regular expression (matched against the node's string representation
with :func:`re.search`). If *matches* is a regex, the flags passed to
:func:`re.search` are :const:`re.IGNORECASE`, :const:`re.DOTALL`, and
:const:`re.UNICODE`, but custom flags can be specified by passing
*flags*.
|
def parse(binary, **params):
encoding = params.get('charset', 'UTF-8')
return json.loads(binary, encoding=encoding)
|
Turns a JSON structure into a python object.
|
def client_getter():
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
if 'client_id' not in kwargs:
abort(500)
client = Client.query.filter_by(
client_id=kwargs.pop('client_id'),
user_id=current_user.get_id(),
).first()
if client is None:
abort(404)
return f(client, *args, **kwargs)
return decorated
return wrapper
|
Decorator to retrieve Client object and check user permission.
|
def get_preds(self, ds_type:DatasetType=DatasetType.Valid, with_loss:bool=False, n_batch:Optional[int]=None, pbar:Optional[PBar]=None,
ordered:bool=False) -> List[Tensor]:
"Return predictions and targets on the valid, train, or test set, depending on `ds_type`."
self.model.reset()
if ordered: np.random.seed(42)
preds = super().get_preds(ds_type=ds_type, with_loss=with_loss, n_batch=n_batch, pbar=pbar)
if ordered and hasattr(self.dl(ds_type), 'sampler'):
np.random.seed(42)
sampler = [i for i in self.dl(ds_type).sampler]
reverse_sampler = np.argsort(sampler)
preds = [p[reverse_sampler] for p in preds]
return(preds)
|
Return predictions and targets on the valid, train, or test set, depending on `ds_type`.
|
def diff_dictionaries(old_dict, new_dict):
old_set = set(old_dict)
new_set = set(new_dict)
added_set = new_set - old_set
removed_set = old_set - new_set
common_set = old_set & new_set
changes = 0
output = []
for key in added_set:
changes += 1
output.append(DictValue(key, None, new_dict[key]))
for key in removed_set:
changes += 1
output.append(DictValue(key, old_dict[key], None))
for key in common_set:
output.append(DictValue(key, old_dict[key], new_dict[key]))
if str(old_dict[key]) != str(new_dict[key]):
changes += 1
output.sort(key=attrgetter("key"))
return [changes, output]
|
Diffs two single dimension dictionaries
Returns the number of changes and an unordered list
expressing the common entries and changes.
Args:
old_dict(dict): old dictionary
new_dict(dict): new dictionary
Returns: list()
int: number of changed records
list: [DictValue]
|
def create_branch(self, branch_name):
self.create()
self.ensure_working_tree()
logger.info("Creating branch '%s' in %s ..", branch_name, format_path(self.local))
self.context.execute(*self.get_create_branch_command(branch_name))
|
Create a new branch based on the working tree's revision.
:param branch_name: The name of the branch to create (a string).
This method automatically checks out the new branch, but note that the
new branch may not actually exist until a commit has been made on the
branch.
|
def read_local_config(cfg):
try:
if os.path.exists(cfg):
config = import_file_object(cfg)
return config
else:
logger.warning(
'%s: local config file (%s) not found, cannot be read' %
(inspect.stack()[0][3], str(cfg)))
except IOError as e:
logger.warning(
'import_file_object: %s error opening %s' % (str(e), str(cfg))
)
return {}
|
Parses local config file for override values
Args:
:local_file (str): filename of local config file
Returns:
dict object of values contained in local config file
|
def get_user_details(self, response):
email = response['email']
username = response.get('nickname', email).split('@', 1)[0]
return {'username': username,
'email': email,
'fullname': '',
'first_name': '',
'last_name': ''}
|
Return user details from OAuth Profile Google App Engine App
|
def decrypt(self,ciphertext,n=''):
self.ed = 'd'
if self.mode == MODE_XTS:
return self.chain.update(ciphertext,'d',n)
else:
return self.chain.update(ciphertext,'d')
|
Decrypt some ciphertext
ciphertext = a string of binary data
n = the 'tweak' value when the chaining mode is XTS
The decrypt function will decrypt the supplied ciphertext.
The behavior varies slightly depending on the chaining mode.
ECB, CBC:
---------
When the supplied ciphertext is not a multiple of the blocksize
of the cipher, then the remaining ciphertext will be cached.
The next time the decrypt function is called with some ciphertext,
the new ciphertext will be concatenated to the cache and then
cache+ciphertext will be decrypted.
CFB, OFB, CTR:
--------------
When the chaining mode allows the cipher to act as a stream cipher,
the decrypt function will always decrypt all of the supplied
ciphertext immediately. No cache will be kept.
XTS:
----
Because the handling of the last two blocks is linked,
it needs the whole block of ciphertext to be supplied at once.
Every decrypt function called on a XTS cipher will output
a decrypted block based on the current supplied ciphertext block.
CMAC:
-----
Mode not supported for decryption as this does not make sense.
|
def _session_key(self):
if not hasattr(self, "_cached_session_key"):
session_id_bytes = self.get_secure_cookie("session_id")
session_id = None
if session_id_bytes:
try:
session_id = session_id_bytes.decode('utf-8')
except:
pass
if not session_id:
session_id = oz.redis_sessions.random_hex(20)
session_time = oz.settings["session_time"]
kwargs = dict(
name="session_id",
value=session_id.encode('utf-8'),
domain=oz.settings.get("cookie_domain"),
httponly=True,
)
if session_time:
kwargs["expires_days"] = round(session_time/60/60/24)
self.set_secure_cookie(**kwargs)
password_salt = oz.settings["session_salt"]
self._cached_session_key = "session:%s:v4" % oz.redis_sessions.password_hash(session_id, password_salt=password_salt)
return self._cached_session_key
|
Gets the redis key for a session
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.