code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
|---|---|
def append_row(self, values, value_input_option='RAW'):
params = {
'valueInputOption': value_input_option
}
body = {
'values': [values]
}
return self.spreadsheet.values_append(self.title, params, body)
|
Adds a row to the worksheet and populates it with values.
Widens the worksheet if there are more values than columns.
:param values: List of values for the new row.
:param value_input_option: (optional) Determines how input data should
be interpreted. See `ValueInputOption`_ in
the Sheets API.
:type value_input_option: str
.. _ValueInputOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueInputOption
|
def redirect_territory(level, code):
territory = GeoZone.objects.valid_at(datetime.now()).filter(
code=code, level='fr:{level}'.format(level=level)).first()
return redirect(url_for('territories.territory', territory=territory))
|
Implicit redirect given the INSEE code.
Optimistically redirect to the latest valid/known INSEE code.
|
def assertTraceDoesNotContain(response, message):
if not hasattr(response, "verify_trace"):
raise AttributeError("Response object does not contain verify_trace method!")
if response.verify_trace(message, False):
raise TestStepFail('Assert: Message(s) "%s" in response' % message)
|
Raise TestStepFail if response.verify_trace finds message from response traces.
:param response: Response. Must contain method verify_trace
:param message: Message to look for
:return: Nothing
:raises: AttributeError if response does not contain verify_trace method.
TestStepFail if verify_trace returns True.
|
def SensorMetatagsPost(self, sensor_id, metatags, namespace = None):
ns = "default" if namespace is None else namespace
if self.__SenseApiCall__("/sensors/{0}/metatags.json?namespace={1}".format(sensor_id, ns), "POST", parameters = metatags):
return True
else:
self.__error__ = "api call unsuccessful"
return False
|
Attach metatags to a sensor for a specific namespace
@param sensor_id (int) - Id of the sensor to attach metatags to
@param namespace (string) - Namespace for which to attach metatags
@param metatags (dictionary) - Metatags to attach to the sensor
@return (bool) - Boolean indicating whether SensorMetatagsPost was successful
|
def interp_like(self, other, method='linear', assume_sorted=False,
kwargs={}):
if self.dtype.kind not in 'uifc':
raise TypeError('interp only works for a numeric type array. '
'Given {}.'.format(self.dtype))
ds = self._to_temp_dataset().interp_like(
other, method=method, kwargs=kwargs, assume_sorted=assume_sorted)
return self._from_temp_dataset(ds)
|
Interpolate this object onto the coordinates of another object,
filling out of range values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provides coordinates upon
which to index the variables in this dataset.
method: string, optional.
{'linear', 'nearest'} for multidimensional array,
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
for 1-dimensional array. 'linear' is used by default.
assume_sorted: boolean, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs: dictionary, optional
Additional keyword passed to scipy's interpolator.
Returns
-------
interpolated: xr.DataArray
Another dataarray by interpolating this dataarray's data along the
coordinates of the other object.
Notes
-----
scipy is required.
If the dataarray has object-type coordinates, reindex is used for these
coordinates instead of the interpolation.
See Also
--------
DataArray.interp
DataArray.reindex_like
|
def _translate_language_name(self, language_name):
languages = self.languages()
language_id = None
for ideone_index, ideone_language in languages.items():
if ideone_language.lower() == language_name.lower():
return ideone_index
simple_languages = dict((k,v.split('(')[0].strip())
for (k,v) in languages.items())
for ideone_index, simple_name in simple_languages.items():
if simple_name.lower() == language_name.lower():
return ideone_index
language_choices = languages.values() + simple_languages.values()
similar_choices = difflib.get_close_matches(language_name,
language_choices,
n=3,
cutoff=0.3)
similar_choices_string = ", ".join(["'" + s + "'"
for s in similar_choices])
error_string = ("Couldn't match '%s' to an Ideone accepted language.\n"
"Did you mean one of the following: %s")
raise IdeoneError(error_string % (language_name, similar_choices_string))
|
Translate a human readable langauge name into its Ideone
integer representation.
Keyword Arguments
-----------------
* langauge_name: a string of the language (e.g. "c++")
Returns
-------
An integer representation of the language.
Notes
-----
We use a local cache of languages if available, else we grab
the list of languages from Ideone. We test for a string match
by comparing prefixes because Ideone includes the language
compiler name and version number. Both strings are converted
to lower case before the comparison.
Examples
--------
>>> ideone_object = Ideone('username', 'password')
>>> ideone_object._translate_language_name('ada')
7
|
def _merge_filters(self) -> None:
for opts in (["-filter:a", "-af"], ["-filter:v", "-vf"]):
filter_list = []
new_argv = []
cmd_iter = iter(self._argv)
for element in cmd_iter:
if element in opts:
filter_list.insert(0, next(cmd_iter))
else:
new_argv.append(element)
if filter_list:
new_argv.extend([opts[0], ",".join(filter_list)])
self._argv = new_argv.copy()
|
Merge all filter config in command line.
|
def get_changes(self, changers, in_hierarchy=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
function_changer = _FunctionChangers(self.pyname.get_object(),
self._definfo(), changers)
return self._change_calls(function_changer, in_hierarchy,
resources, task_handle)
|
Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
|
def cree_ws_lecture(self, champs_ligne):
for c in champs_ligne:
label = ASSOCIATION[c][0]
w = ASSOCIATION[c][3](self.acces[c], False)
w.setObjectName("champ-lecture-seule-details")
self.widgets[c] = (w, label)
|
Alternative to create read only widgets. They should be set after.
|
def get_sql(self):
test_method = [
self.is_time,
self.is_date,
self.is_datetime,
self.is_decimal,
self.is_year,
self.is_tinyint,
self.is_smallint,
self.is_mediumint,
self.is_int,
self.is_bigint,
self.is_tinytext,
self.is_varchar,
self.is_mediumtext,
self.is_longtext,
]
for method in test_method:
if method():
return self.sql
|
Retrieve the data type for a data record.
|
def align_after(self, offset):
f = self.reader
if offset <= 0:
f.seek(0)
self._block_count = 0
self._read_header()
return
sm = self.sync_marker
sml = len(sm)
pos = offset
while pos < self.file_length - sml:
f.seek(pos)
data = f.read(self.FORWARD_WINDOW_SIZE)
sync_offset = data.find(sm)
if sync_offset > -1:
f.seek(pos + sync_offset)
self._block_count = 0
return
pos += len(data)
|
Search for a sync point after offset and align just after that.
|
def render_form(form, **kwargs):
renderer_cls = get_form_renderer(**kwargs)
return renderer_cls(form, **kwargs).render()
|
Render a form to a Bootstrap layout
|
def depart_heading(self, _):
assert isinstance(self.current_node, nodes.title)
text = self.current_node.astext()
if self.translate_section_name:
text = self.translate_section_name(text)
name = nodes.fully_normalize_name(text)
section = self.current_node.parent
section['names'].append(name)
self.document.note_implicit_target(section, section)
self.current_node = section
|
Finish establishing section
Wrap up title node, but stick in the section node. Add the section names
based on all the text nodes added to the title.
|
def bound_spec(self, name):
if isinstance(name, BaseData):
name = name.name
spec = self.data_spec(name)
try:
bound = self._inputs[name]
except KeyError:
if not spec.derived and spec.default is None:
raise ArcanaMissingDataException(
"Acquired (i.e. non-generated) fileset '{}' "
"was not supplied when the study '{}' was "
"initiated".format(name, self.name))
else:
try:
bound = self._bound_specs[name]
except KeyError:
bound = self._bound_specs[name] = spec.bind(self)
return bound
|
Returns an input selector or derived spec bound to the study, i.e.
where the repository tree is checked for existing outputs
Parameters
----------
name : Str
A name of a fileset or field
|
def rows(self):
from ambry.orm import Config as SAConfig
from sqlalchemy import or_
rows = []
configs = self.dataset.session\
.query(SAConfig)\
.filter(or_(SAConfig.group == 'config', SAConfig.group == 'process'),
SAConfig.d_vid == self.dataset.vid)\
.all()
for r in configs:
parts = r.key.split('.', 3)
if r.group == 'process':
parts = ['process'] + parts
cr = ((parts[0] if len(parts) > 0 else None,
parts[1] if len(parts) > 1 else None,
parts[2] if len(parts) > 2 else None
), r.value)
rows.append(cr)
return rows
|
Return configuration in a form that can be used to reconstitute a
Metadata object. Returns all of the rows for a dataset.
This is distinct from get_config_value, which returns the value
for the library.
|
def detect_phantomjs(version='2.1'):
if settings.phantomjs_path() is not None:
phantomjs_path = settings.phantomjs_path()
else:
if hasattr(shutil, "which"):
phantomjs_path = shutil.which("phantomjs") or "phantomjs"
else:
phantomjs_path = "phantomjs"
try:
proc = Popen([phantomjs_path, "--version"], stdout=PIPE, stderr=PIPE)
proc.wait()
out = proc.communicate()
if len(out[1]) > 0:
raise RuntimeError('Error encountered in PhantomJS detection: %r' % out[1].decode('utf8'))
required = V(version)
installed = V(out[0].decode('utf8'))
if installed < required:
raise RuntimeError('PhantomJS version to old. Version>=%s required, installed: %s' % (required, installed))
except OSError:
raise RuntimeError('PhantomJS is not present in PATH or BOKEH_PHANTOMJS_PATH. Try "conda install phantomjs" or \
"npm install -g phantomjs-prebuilt"')
return phantomjs_path
|
Detect if PhantomJS is avaiable in PATH, at a minimum version.
Args:
version (str, optional) :
Required minimum version for PhantomJS (mostly for testing)
Returns:
str, path to PhantomJS
|
def get_user_groups(name, sid=False):
if name == 'SYSTEM':
groups = [name]
else:
groups = win32net.NetUserGetLocalGroups(None, name)
if not sid:
return groups
ret_groups = set()
for group in groups:
ret_groups.add(get_sid_from_name(group))
return ret_groups
|
Get the groups to which a user belongs
Args:
name (str): The user name to query
sid (bool): True will return a list of SIDs, False will return a list of
group names
Returns:
list: A list of group names or sids
|
def hashitem(item):
norm = normitem(item)
byts = s_msgpack.en(norm)
return hashlib.md5(byts).hexdigest()
|
Generate a uniq hash for the JSON compatible primitive data structure.
|
def from_ssl(self,
ca_certs,
client_cert,
client_key,
hosts=default.ELASTICSEARCH_HOSTS,
use_ssl=True,
verify_certs=True, **kwargs):
self.client = Elasticsearch(hosts=hosts,
use_ssl=use_ssl,
verify_certs=verify_certs,
ca_certs=ca_certs,
client_cert=client_cert,
client_key=client_key, **kwargs)
logger.info('Initialize SSL Elasticsearch Client: %s.' % self.client)
|
Initialize a Elasticsearch client by SSL.
:param ca_certs: optional path to CA bundle. See
https://urllib3.readthedocs.io/en/latest/security.html#using-certifi-with-urllib3
:param client_cert: path to the file containing the private key and the
certificate, or cert only if using client_key
:param client_key: path to the file containing the private key if using
separate cert and key files (client_cert will contain only the cert)
:param hosts: hostname of the node
:param use_ssl: use ssl for the connection if `True`
:param verify_certs: whether to verify SSL certificates
:return: void
|
def _return_base_data(self, url, container, container_object=None,
container_headers=None, object_headers=None):
headers = self.job_args['base_headers']
headers.update({'X-Auth-Token': self.job_args['os_token']})
_container_uri = url.geturl().rstrip('/')
if container:
_container_uri = '%s/%s' % (
_container_uri, cloud_utils.quoter(container)
)
if container_object:
_container_uri = '%s/%s' % (
_container_uri, cloud_utils.quoter(container_object)
)
if object_headers:
headers.update(object_headers)
if container_headers:
headers.update(container_headers)
return headers, urlparse.urlparse(_container_uri)
|
Return headers and a parsed url.
:param url:
:param container:
:param container_object:
:param container_headers:
:return: ``tuple``
|
def add(self, cls_or_branch, *args, **kwargs):
if isinstance(cls_or_branch, Branch):
self.tasks.append(cls_or_branch)
else:
self.__validate_task(cls_or_branch, '__init__', args, kwargs)
self.tasks.append({'cls_or_branch': cls_or_branch, 'args': args, 'kwargs': kwargs})
return self
|
Adds a task or branch to the lane.
Parameters
----------
cls_or_branch : Class
*args
Variable length argument list to be passed to `cls_or_branch` during instantiation
**kwargs
Variable length keyword arguments to be passed to `cls_or_branch` during instantiation
Returns
-------
self: Returns `self` to allow method chaining
|
def add_etag(self, overwrite=False, weak=False):
if overwrite or "etag" not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
|
Add an etag for the current response if there is none yet.
|
def refresh(self):
if lib.EnvRefresh(self._env, self._rule) != 1:
raise CLIPSError(self._env)
|
Refresh the Rule.
The Python equivalent of the CLIPS refresh command.
|
def _update_sid_to_last_existing_pid_map(pid):
last_pid = _find_head_or_latest_connected(pid)
chain_model = _get_chain_by_pid(last_pid)
if not chain_model:
return
chain_model.head_pid = d1_gmn.app.did.get_or_create_did(last_pid)
chain_model.save()
|
Set chain head PID to the last existing object in the chain to which ``pid``
belongs. If SID has been set for chain, it resolves to chain head PID.
Intended to be called in MNStorage.delete() and other chain manipulation.
Preconditions:
- ``pid`` must exist and be verified to be a PID.
d1_gmn.app.views.asserts.is_existing_object()
|
def _stage_input_files(self, file_mapping, dry_run=True):
if self._file_stage is None:
return
self._file_stage.copy_to_scratch(file_mapping, dry_run)
|
Stage the input files to the scratch area and adjust the arguments accordingly
|
def _add_record(table, data, buffer_size):
fields = table.fields
for invalid_key in set(data).difference([f.name for f in fields]):
del data[invalid_key]
table.append(Record.from_dict(fields, data))
if buffer_size is not None and table.is_attached():
if (len(table) - 1) - table._last_synced_index > buffer_size:
table.commit()
|
Prepare and append a Record into its Table; flush to disk if necessary.
|
def predictions(self):
for prediction in self.api.predictions(vid=self.vid)['prd']:
pobj = Prediction.fromapi(self.api, prediction)
pobj._busobj = self
yield pobj
|
Generator that yields prediction objects from an API response.
|
def main(argv=None):
arguments = cli_common(__doc__, argv=argv)
benet = BeNet(arguments['CAMPAIGN_FILE'])
benet.run()
if argv is not None:
return benet
|
ben-nett entry point
|
def bifurcated_extend(self, corpus, max_size):
temp_fd, temp_path = tempfile.mkstemp(text=True)
try:
self._prepare_bifurcated_extend_data(corpus, max_size, temp_path,
temp_fd)
finally:
try:
os.remove(temp_path)
except OSError as e:
msg = ('Failed to remove temporary file containing unreduced '
'results: {}')
self._logger.error(msg.format(e))
self._bifurcated_extend()
|
Replaces the results with those n-grams that contain any of the
original n-grams, and that represent points at which an n-gram
is a constituent of multiple larger n-grams with a lower label
count.
:param corpus: corpus of works to which results belong
:type corpus: `Corpus`
:param max_size: maximum size of n-gram results to include
:type max_size: `int`
|
def cmd_zf(self, ch=None):
viewer = self.get_viewer(ch)
if viewer is None:
self.log("No current viewer/channel.")
return
viewer.zoom_fit()
cur_lvl = viewer.get_zoom()
self.log("zoom=%f" % (cur_lvl))
|
zf ch=chname
Zoom the image for the given viewer/channel to fit the window.
|
def calc_2d_forces(self,x1,y1,x2,y2,width):
if x1>x2:
a = x1-x2
else:
a = x2-x1
a_sq=a*a
if y1>y2:
b = y1-y2
else:
b = y2-y1
b_sq=b*b
from math import sqrt
c_sq = a_sq+b_sq
c = sqrt(c_sq)
if c > width:
return 0,0
else:
overlap = width-c
return -overlap/2, overlap/2
|
Calculate overlap in 2D space
|
def on_channel_closed(self, channel, reply_code, reply_text):
for future in self.messages.values():
future.set_exception(AMQPException(reply_code, reply_text))
self.messages = {}
if self.closing:
LOGGER.debug('Channel %s was intentionally closed (%s) %s',
channel, reply_code, reply_text)
else:
LOGGER.warning('Channel %s was closed: (%s) %s',
channel, reply_code, reply_text)
self.state = self.STATE_BLOCKED
if self.on_unavailable:
self.on_unavailable(self)
self.channel = self._open_channel()
|
Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters.
In this case, we just want to log the error and create a new channel
after setting the state back to connecting.
:param pika.channel.Channel channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
|
def _parsemeta_tmy2(columns, line):
rawmeta = " ".join(line.split()).split(" ")
meta = rawmeta[:3]
meta.append(int(rawmeta[3]))
longitude = (
float(rawmeta[5]) + float(rawmeta[6])/60) * (2*(rawmeta[4] == 'N') - 1)
latitude = (
float(rawmeta[8]) + float(rawmeta[9])/60) * (2*(rawmeta[7] == 'E') - 1)
meta.append(longitude)
meta.append(latitude)
meta.append(float(rawmeta[10]))
meta_dict = dict(zip(columns.split(','), meta))
return meta_dict
|
Retrieves metadata from the top line of the tmy2 file.
Parameters
----------
columns : string
String of column headings in the header
line : string
Header string containing DataFrame
Returns
-------
meta : Dict of metadata contained in the header string
|
def sqrt(n):
if isinstance(n, Rational):
n = Constructible(n)
elif not isinstance(n, Constructible):
raise ValueError('the square root is not implemented for the type %s' % type(n))
r = n._try_sqrt()
if r is not None:
return r
return Constructible(Constructible.lift_rational_field(0, n.field),
Constructible.lift_rational_field(1, n.field),
(n, n.field))
|
return the square root of n in an exact representation
|
def add_markdown_cell(self, content, tags=None):
self.notebook["cells"].append(nb.v4.new_markdown_cell(content, **{"metadata":
{"tags": tags}}))
|
Class method responsible for adding a markdown cell with content 'content' to the
Notebook object.
----------
Parameters
----------
content : str
Text/HTML code/... to include in the markdown cell (triple quote for multiline text).
tags : list
A list of tags to include in the markdown cell metadata.
|
async def flush(self, request: 'Request'):
from bernard.middleware import MiddlewareManager
for stack in self._stacks:
await stack.convert_media(self.platform)
func = MiddlewareManager.instance().get('flush', self._flush)
await func(request, self._stacks)
|
Send all queued messages.
The first step is to convert all media in the stacked layers then the
second step is to send all messages as grouped in time as possible.
|
def consume(self):
if self.match:
self.pos = self.match.end()
if self.match.group()[-1] == '\n':
self._update_prefix()
self.match = None
|
Consume the body of source. ``pos`` will move forward.
|
def new_iteration(self, prefix):
self.flush()
self.prefix[-1] = prefix
self.reset_formatter()
|
When inside a loop logger, created a new iteration
|
def quantile(q, variable, weight_variable = None, filter_variable = None):
def formula(entity, period):
value = entity(variable, period)
if weight_variable is not None:
weight = entity(weight_variable, period)
weight = entity.filled_array(1)
if filter_variable is not None:
filter_value = entity(filter_variable, period)
weight = filter_value * weight
labels = arange(1, q + 1)
quantile, _ = weightedcalcs_quantiles(
value,
labels,
weight,
return_quantiles = True,
)
if filter_variable is not None:
quantile = where(weight > 0, quantile, -1)
return quantile
return formula
|
Return quantile of a variable with weight provided by a specific wieght variable potentially filtered
|
def _CheckLegacyPassword(self, password):
import crypt
salt = self._value[:2]
return crypt.crypt(password, salt) == self._value
|
Check password with legacy crypt based method.
|
def common_ancestor(c):
span1 = _to_span(c[0])
span2 = _to_span(c[1])
ancestor1 = np.array(span1.sentence.xpath.split("/"))
ancestor2 = np.array(span2.sentence.xpath.split("/"))
min_len = min(ancestor1.size, ancestor2.size)
return list(ancestor1[: np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
|
Return the path to the root that is shared between a binary-Mention Candidate.
In particular, this is the common path of HTML tags.
:param c: The binary-Mention Candidate to evaluate
:rtype: list of strings
|
def make_pkgng_aware(jname):
ret = {'changes': {}}
cdir = _config_dir()
if not os.path.isdir(cdir):
os.makedirs(cdir)
if os.path.isdir(cdir):
ret['changes'] = 'Created poudriere make file dir {0}'.format(cdir)
else:
return 'Could not create or find required directory {0}'.format(
cdir)
__salt__['file.write']('{0}-make.conf'.format(os.path.join(cdir, jname)), 'WITH_PKGNG=yes')
if os.path.isfile(os.path.join(cdir, jname) + '-make.conf'):
ret['changes'] = 'Created {0}'.format(
os.path.join(cdir, '{0}-make.conf'.format(jname))
)
return ret
else:
return 'Looks like file {0} could not be created'.format(
os.path.join(cdir, jname + '-make.conf')
)
|
Make jail ``jname`` pkgng aware
CLI Example:
.. code-block:: bash
salt '*' poudriere.make_pkgng_aware <jail name>
|
def read(self):
data = bytearray()
while True:
incoming_bytes = self.comport.inWaiting()
if incoming_bytes == 0:
break
else:
content = self.comport.read(size=incoming_bytes)
data.extend(bytearray(content))
return data
|
Read data from serial port and returns a ``bytearray``.
|
def find_spectrum_match(spec, spec_lib, method='euclidian'):
spec = spec / np.max(spec)
if method == 'dot':
d1 = (spec_lib * lil_matrix(spec).T).sum(axis=1).A ** 2
d2 = np.sum(spec ** 2) * spec_lib.multiply(spec_lib).sum(axis=1).A
dist = d1 / d2
elif method == 'euclidian':
st_spc = dia_matrix((spec, [0]), shape=(len(spec), len(spec)))
dist_sp = spec_lib.multiply(spec_lib) - 2 * spec_lib.dot(st_spc)
dist = dist_sp.sum(axis=1).A + np.sum(spec ** 2)
return (dist.argmin(), dist.min())
|
Find spectrum in spec_lib most similar to spec.
|
def file_id(self):
if self.type.lower() == "directory":
return None
if self.file_uuid is None:
raise exceptions.MetsError(
"No FILEID: File %s does not have file_uuid set" % self.path
)
if self.is_aip:
return os.path.splitext(os.path.basename(self.path))[0]
return utils.FILE_ID_PREFIX + self.file_uuid
|
Returns the fptr @FILEID if this is not a Directory.
|
def _update_linear_bucket_count(a_float, dist):
buckets = dist.linearBuckets
if buckets is None:
raise ValueError(_BAD_UNSET_BUCKETS % (u'linear buckets'))
bucket_counts = dist.bucketCounts
num_finite_buckets = buckets.numFiniteBuckets
if len(bucket_counts) < num_finite_buckets + 2:
raise ValueError(_BAD_LOW_BUCKET_COUNT)
width = buckets.width
lower = buckets.offset
upper = lower + (num_finite_buckets * width)
if a_float < lower:
index = 0
elif a_float >= upper:
index = num_finite_buckets + 1
else:
index = 1 + int(((a_float - lower) / width))
bucket_counts[index] += 1
_logger.debug(u'upper:%f, lower:%f, width:%f, sample:%f, index:%d',
upper, lower, width, a_float, index)
|
Adds `a_float` to `dist`, updating the its linear buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not already have linear buckets defined
ValueError: if there are not enough bucket count fields in `dist`
|
def get_matrix(self):
if self.parent:
return self.get_local_matrix() * (self._prev_parent_matrix or self.parent.get_matrix())
else:
return self.get_local_matrix()
|
return sprite's current transformation matrix
|
def get_parent_id(self, resource, document):
parent_type = self._get_parent_type(resource)
if parent_type and document:
return document.get(parent_type.get('field'))
return None
|
Get the Parent Id of the document
:param resource: resource name
:param document: document containing the parent id
|
def _compute(self, data):
local_ts = self._local_ts(*data)
dt = local_ts[internal_names.TIME_WEIGHTS_STR]
dt = dt / np.timedelta64(1, 'D')
return local_ts, dt
|
Perform the calculation.
|
def get_property(self, name):
with self.__properties_lock:
return self.__properties.get(name, os.getenv(name))
|
Retrieves a framework or system property. As framework properties don't
change while it's running, this method don't need to be protected.
:param name: The property name
|
def _update_xyz(self, change):
self.x,self.y,self.z = self.position.X(),self.position.Y(),self.position.Z()
|
Keep x,y,z in sync with position
|
def profile_loglike(self, x):
if self._prof_interp is None:
return self._profile_loglike(x)[1]
x = np.array(x, ndmin=1)
return self._prof_interp(x)
|
Profile log-likelihood.
Returns ``L_prof(x,y=y_min|z')`` : where y_min is the
value of y that minimizes
L for a given x.
This will used the cached '~fermipy.castro.Interpolator' object
if possible, and construct it if needed.
|
def vn_delete(call=None, kwargs=None):
if call != 'function':
raise SaltCloudSystemExit(
'The vn_delete function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
vn_id = kwargs.get('vn_id', None)
if vn_id:
if name:
log.warning(
'Both the \'vn_id\' and \'name\' arguments were provided. '
'\'vn_id\' will take precedence.'
)
elif name:
vn_id = get_vn_id(kwargs={'name': name})
else:
raise SaltCloudSystemExit(
'The vn_delete function requires a \'name\' or a \'vn_id\' '
'to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
response = server.one.vn.delete(auth, int(vn_id))
data = {
'action': 'vn.delete',
'deleted': response[0],
'vn_id': response[1],
'error_code': response[2],
}
return data
|
Deletes the given virtual network from OpenNebula. Either a name or a vn_id must
be supplied.
.. versionadded:: 2016.3.0
name
The name of the virtual network to delete. Can be used instead of ``vn_id``.
vn_id
The ID of the virtual network to delete. Can be used instead of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f vn_delete opennebula name=my-virtual-network
salt-cloud --function vn_delete opennebula vn_id=3
|
def get_config(config_file):
def load(fp):
try:
return yaml.safe_load(fp)
except yaml.YAMLError as e:
sys.stderr.write(text_type(e))
sys.exit(1)
if config_file == '-':
return load(sys.stdin)
if not os.path.exists(config_file):
sys.stderr.write('ERROR: Must either run next to config.yaml or'
' specify a config file.\n' + __doc__)
sys.exit(2)
with open(config_file) as fp:
return load(fp)
|
Get configuration from a file.
|
def setKeepAliveTimeOut(self, iTimeOut):
print '%s call setKeepAliveTimeOut' % self.port
print iTimeOut
try:
cmd = WPANCTL_CMD + 'setprop NCP:SleepyPollInterval %s' % str(iTimeOut*1000)
print cmd
return self.__sendCommand(cmd)[0] != 'Fail'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger('setKeepAliveTimeOut() Error: ' + str(e))
|
set keep alive timeout for device
has been deprecated and also set SED polling rate
Args:
iTimeOut: data poll period for sleepy end device
Returns:
True: successful to set the data poll period for SED
False: fail to set the data poll period for SED
|
def disable_if_no_tty(cls):
if sys.stdout.isatty() or sys.stderr.isatty():
return False
cls.disable_all_colors()
return True
|
Disable all colors only if there is no TTY available.
:return: True if colors are disabled, False if stderr or stdout is a TTY.
:rtype: bool
|
def import_class(clspath):
modpath, clsname = split_clspath(clspath)
__import__(modpath)
module = sys.modules[modpath]
return getattr(module, clsname)
|
Given a clspath, returns the class.
Note: This is a really simplistic implementation.
|
def setExpertLevel(self):
g = get_root(self).globals
level = g.cpars['expert_level']
if level == 0:
if self.val.get() == 'CCD TECs':
self.val.set('Observe')
self._changed()
self.tecs.grid_forget()
else:
self.tecs.grid(row=0, column=3, sticky=tk.W)
|
Modifies widget according to expertise level, which in this
case is just matter of hiding or revealing the button to
set CCD temps
|
def transform(self, maps):
out = {}
out["chi_p"] = conversions.chi_p(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
return self.format_output(maps, out)
|
This function transforms from component masses and caretsian spins
to chi_p.
Parameters
----------
maps : a mapping object
Examples
--------
Convert a dict of numpy.array:
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
|
def load_metrics(event_dir, epoch):
metrics = {}
for filename in tf.gfile.ListDirectory(event_dir):
path = os.path.join(event_dir, filename)
for event in tf.train.summary_iterator(path):
if event.step == epoch and event.HasField("summary"):
value = event.summary.value[0]
metrics[value.tag] = value.simple_value
return metrics
|
Loads metrics for this epoch if they have already been written.
This reads the entire event file but it's small with just per-epoch metrics.
Args:
event_dir: TODO(koz4k): Document this.
epoch: TODO(koz4k): Document this.
Returns:
metrics.
|
def create_multiple_replace_func(*args, **kwds):
adict = dict(*args, **kwds)
rx = re.compile('|'.join(map(re.escape, adict)))
def one_xlat(match):
return adict[match.group(0)]
def xlat(text):
return rx.sub(one_xlat, text)
return xlat
|
You can call this function and pass it a dictionary, or any other
combination of arguments you could pass to built-in dict in order to
construct a dictionary. The function will return a xlat closure that
takes as its only argument text the string on which the substitutions
are desired and returns a copy of text with all the substitutions
performed.
Source: Python Cookbook 2nd ed, Chapter 1.18. Replacing Multiple Patterns
in a Single Pass.
https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html
|
def _eval(self, v, in_bounds, der):
result = np.zeros_like(v, dtype='float')
x_indices = np.searchsorted(self._x, v, side='rigth')
ids = x_indices[in_bounds] - 1
u = v[in_bounds] - self._x[ids]
result[in_bounds] = self._poly_eval(u, ids, der)
return result
|
Eval polynomial inside bounds.
|
def sanitizeStructTime(struct):
maxValues = (9999, 12, 31, 23, 59, 59)
minValues = (1, 1, 1, 0, 0, 0)
newstruct = []
for value, maxValue, minValue in zip(struct[:6], maxValues, minValues):
newstruct.append(max(minValue, min(value, maxValue)))
return tuple(newstruct) + struct[6:]
|
Convert struct_time tuples with possibly invalid values to valid
ones by substituting the closest valid value.
|
def from_table(self, table=None, fields='*', schema=None, **kwargs):
self.tables.append(TableFactory(
table=table,
fields=fields,
schema=schema,
owner=self,
**kwargs
))
return self
|
Adds a ``Table`` and any optional fields to the list of tables
this query is selecting from.
:type table: str or dict or :class:`Table <querybuilder.tables.Table>`
or :class:`Query <querybuilder.query.Query>` or
:class:`ModelBase <django:django.db.models.base.ModelBase>`
:param table: The table to select fields from. This can be a string of the table
name, a dict of {'alias': table}, a ``Table`` instance, a Query instance, or a
django Model instance
:type fields: str or tuple or list or Field
:param fields: The fields to select from ``table``. Defaults to '*'. This can be
a single field, a tuple of fields, or a list of fields. Each field can be a string
or ``Field`` instance
:type schema: str
:param schema: This is not implemented, but it will be a string of the db schema name
:param kwargs: Any additional parameters to be passed into the constructor of ``TableFactory``
:return: self
:rtype: :class:`Query <querybuilder.query.Query>`
|
async def flexible_api_handler(service, action_type, payload, props, **kwds):
if action_type == intialize_service_action():
model = json.loads(payload) if isinstance(payload, str) else payload
models = service._external_service_data['models']
connections = service._external_service_data['connections']
mutations = service._external_service_data['mutations']
if 'connection' in model:
if not [conn for conn in connections if conn['name'] == model['name']]:
connections.append(model)
elif 'fields' in model and not [mod for mod in models if mod['name'] == model['name']]:
models.append(model)
if 'mutations' in model:
for mutation in model['mutations']:
if not [mut for mut in mutations if mut['name'] == mutation['name']]:
mutations.append(mutation)
if models:
service.schema = generate_api_schema(
models=models,
connections=connections,
mutations=mutations,
)
|
This query handler builds the dynamic picture of availible services.
|
def build_payload(self, payload):
for segment in self.segments:
segment.pack(payload, commit=self.autocommit)
|
Build payload of message.
|
def head(self, n=5):
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
|
Return first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
|
def disable_busy_cursor():
while QgsApplication.instance().overrideCursor() is not None and \
QgsApplication.instance().overrideCursor().shape() == \
QtCore.Qt.WaitCursor:
QgsApplication.instance().restoreOverrideCursor()
|
Disable the hourglass cursor and listen for layer changes.
|
def validate_field(field, allowed_keys, allowed_types):
for key, value in field.items():
if key not in allowed_keys:
raise exceptions.ParametersFieldError(key, "property")
if key == defs.TYPE:
if value not in allowed_types:
raise exceptions.ParametersFieldError(value, key)
if key == defs.VALUE:
if not is_valid_field_name(value):
raise exceptions.ParametersFieldError(value, "field name")
|
Validate field is allowed and valid.
|
def join(self, iterable):
return self.__class__(super(ColorStr, self).join(iterable), keep_tags=True)
|
Return a string which is the concatenation of the strings in the iterable.
:param iterable: Join items in this iterable.
|
def _parse_multifile(self, desired_type: Type[T], obj: PersistedObject,
parsing_plan_for_children: Dict[str, ParsingPlan], logger: Logger,
options: Dict[str, Dict[str, Any]]) -> T:
pass
|
First parse all children from the parsing plan, then calls _build_object_from_parsed_children
:param desired_type:
:param obj:
:param parsing_plan_for_children:
:param logger:
:param options:
:return:
|
def ensure_unicoded_and_unique(args_list, application):
unicoded_args = []
for argument in args_list:
argument = (six.u(argument)
if not isinstance(argument, six.text_type) else argument)
if argument not in unicoded_args or argument == application:
unicoded_args.append(argument)
return unicoded_args
|
Iterate over args_list, make it unicode if needed and ensure that there
are no duplicates.
Returns list of unicoded arguments in the same order.
|
def edge_has_annotation(edge_data: EdgeData, key: str) -> Optional[Any]:
annotations = edge_data.get(ANNOTATIONS)
if annotations is None:
return None
return annotations.get(key)
|
Check if an edge has the given annotation.
:param edge_data: The data dictionary from a BELGraph's edge
:param key: An annotation key
:return: If the annotation key is present in the current data dictionary
For example, it might be useful to print all edges that are annotated with 'Subgraph':
>>> from pybel.examples import sialic_acid_graph
>>> for u, v, data in sialic_acid_graph.edges(data=True):
>>> if edge_has_annotation(data, 'Species')
>>> print(u, v, data)
|
def _mean_dict(dict_list):
return {k: np.array([d[k] for d in dict_list]).mean()
for k in dict_list[0].keys()}
|
Compute the mean value across a list of dictionaries
|
def skip_redundant(iterable, skipset=None):
if skipset is None:
skipset = set()
for item in iterable:
if item not in skipset:
skipset.add(item)
yield item
|
Redundant items are repeated items or items in the original skipset.
|
def setNumberRange(key, keyType, start, end):
return And(
And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)),
And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))),
)
|
check number range
|
def erase_in_display(self, how=0, *args, **kwargs):
if how == 0:
interval = range(self.cursor.y + 1, self.lines)
elif how == 1:
interval = range(self.cursor.y)
elif how == 2 or how == 3:
interval = range(self.lines)
self.dirty.update(interval)
for y in interval:
line = self.buffer[y]
for x in line:
line[x] = self.cursor.attrs
if how == 0 or how == 1:
self.erase_in_line(how)
|
Erases display in a specific way.
Character attributes are set to cursor attributes.
:param int how: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of screen, including
cursor position.
* ``1`` -- Erases from beginning of screen to cursor,
including cursor position.
* ``2`` and ``3`` -- Erases complete display. All lines
are erased and changed to single-width. Cursor does not
move.
:param bool private: when ``True`` only characters marked as
eraseable are affected **not implemented**.
.. versionchanged:: 0.8.1
The method accepts any number of positional arguments as some
``clear`` implementations include a ``;`` after the first
parameter causing the stream to assume a ``0`` second parameter.
|
def _on_stackexchange_request(self, future, response):
content = escape.json_decode(response.body)
if 'error' in content:
future.set_exception(Exception('StackExchange error: %s' %
str(content['error'])))
return
future.set_result(content)
|
Invoked as a response to the StackExchange API request. Will decode
the response and set the result for the future to return the callback or
raise an exception
|
def get_node(self, node_name):
for node in self.nodes:
if node.__name__ == node_name:
return node
|
Retrieve node with passed name
|
def fetch_token(self):
grant_type = 'client_credentials'
channel = yield self._tvm.ticket_full(
self._client_id, self._client_secret, grant_type, {})
ticket = yield channel.rx.get()
raise gen.Return(self._make_token(ticket))
|
Gains token from secure backend service.
:return: Token formatted for Cocaine protocol header.
|
def get(self, *args, **kwargs):
return self.session.get(*args, **self.get_kwargs(**kwargs))
|
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
|
def get_reservation_ports(session, reservation_id, model_name='Generic Traffic Generator Port'):
reservation_ports = []
reservation = session.GetReservationDetails(reservation_id).ReservationDescription
for resource in reservation.Resources:
if resource.ResourceModelName == model_name:
reservation_ports.append(resource)
return reservation_ports
|
Get all Generic Traffic Generator Port in reservation.
:return: list of all Generic Traffic Generator Port resource objects in reservation
|
def _sid_subdir_path(sid):
padded_sid = format(sid, '06')
return os.path.join(
padded_sid[0:2],
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
|
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
|
def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
encoder = self._registry.get_encoder(typ)
try:
encoder.validate_value(arg)
except EncodingError:
return False
except AttributeError:
try:
encoder(arg)
except EncodingError:
return False
return True
|
Determines if the python value ``arg`` is encodable as a value of the
ABI type ``typ``.
:param typ: A string representation for the ABI type against which the
python value ``arg`` will be checked e.g. ``'uint256'``,
``'bytes[]'``, ``'(int,int)'``, etc.
:param arg: The python value whose encodability should be checked.
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
``typ``. Otherwise, ``False``.
|
def update_health(self, reporter, info):
with self.changes_squashed:
alarm = info.alarm
if alarm.is_ok():
self._faults.pop(reporter, None)
else:
self._faults[reporter] = alarm
if self._faults:
faults = sorted(self._faults.values(),
key=lambda a: a.severity.value)
alarm = faults[-1]
text = faults[-1].message
else:
alarm = None
text = "OK"
self.health.set_value(text, alarm=alarm)
|
Set the health attribute. Called from part
|
def defocus_blur(x, severity=1):
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0))
x_clip = np.clip(channels, 0, 1) * 255
return around_and_astype(x_clip)
|
Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
|
def _populateBuffer(self, stream, n):
try:
for x in xrange(n):
output = stream.next()
self._buffer.write(output)
except StopIteration, e:
self._deferred.callback(None)
except Exception, e:
self._deferred.errback(e)
else:
self.delayedCall = reactor.callLater(CALL_DELAY, self._populateBuffer, stream, n)
|
Iterator that returns N steps of
the genshi stream.
Found that performance really sucks for
n = 1 (0.5 requests/second for the root resources
versus 80 requests/second for a blocking algorithm).
Hopefully increasing the number of steps per timeslice will
significantly improve performance.
|
def _extract_alphabet(self, grammar):
alphabet = set([])
for terminal in grammar.Terminals:
alphabet |= set([x for x in terminal])
self.alphabet = list(alphabet)
|
Extract an alphabet from the given grammar.
|
def Compile(self, filter_implementation):
arguments = [self.attribute]
for argument in self.args:
arguments.append(argument.Compile(filter_implementation))
expander = filter_implementation.FILTERS['ValueExpander']
context_cls = filter_implementation.FILTERS['Context']
return context_cls(arguments=arguments,
value_expander=expander)
|
Compile the expression.
|
def get_users_by_ids(self, user_ids):
urls = [urljoin(self.user_url, F"{i}.json") for i in user_ids]
result = self._run_async(urls=urls)
return [User(r) for r in result if r]
|
Given a list of user ids, return all the User objects
|
def set(self, prop, value):
prop_parts = prop.split(".")
if self.copy_dict:
new_dict = copy.deepcopy(self.obj)
else:
new_dict = self.obj
pointer = None
parts_length = len(prop_parts) - 1
for i, part in enumerate(prop_parts):
if pointer is None and i == parts_length:
new_dict[part] = value
elif pointer is None:
pointer = new_dict.get(part)
elif i == parts_length:
pointer[part] = value
else:
pointer = pointer.get(part)
return new_dict
|
sets the dot notated property to the passed in value
args:
prop: a string of the property to retreive
"a.b.c" ~ dictionary['a']['b']['c']
value: the value to set the prop object
|
def censor_entity_types(self, entity_types):
assert type(entity_types) == set
self._entity_types_to_censor = entity_types
self._feats_from_spacy_doc = FeatsFromSpacyDoc(
use_lemmas=self._use_lemmas,
entity_types_to_censor=self._entity_types_to_censor
)
return self
|
Entity types to exclude from feature construction. Terms matching
specificed entities, instead of labeled by their lower case orthographic
form or lemma, will be labeled by their entity type.
Parameters
----------
entity_types : set of entity types outputted by spaCy
'TIME', 'WORK_OF_ART', 'PERSON', 'MONEY', 'ORG', 'ORDINAL', 'DATE',
'CARDINAL', 'LAW', 'QUANTITY', 'GPE', 'PERCENT'
Returns
---------
self
|
def from_dict(self, document):
identifier = str(document['_id'])
active = document['active']
timestamp = datetime.datetime.strptime(document['timestamp'], '%Y-%m-%dT%H:%M:%S.%f')
properties = document['properties']
directory = self.get_directory(identifier)
return ImageHandle(identifier, properties, directory, timestamp=timestamp, is_active=active)
|
Create image object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ImageHandle
Handle for image object
|
def get_files(self, file_paths):
results = []
def get_file_thunk(path, interface):
result = error = None
try:
result = interface.get_file(path)
except Exception as err:
error = err
print(err)
content, encoding = result
content = compression.decompress(content, encoding)
results.append({
"filename": path,
"content": content,
"error": error,
})
for path in file_paths:
if len(self._threads):
self.put(partial(get_file_thunk, path))
else:
get_file_thunk(path, self._interface)
desc = 'Downloading' if self.progress else None
self.wait(desc)
return results
|
returns a list of files faster by using threads
|
def make_tree(self):
self.tree['is_ready'] = False
leaf_count = len(self.tree['leaves'])
if leaf_count > 0:
self._unshift(self.tree['levels'], self.tree['leaves'])
while len(self.tree['levels'][0]) > 1:
self._unshift(self.tree['levels'], self._calculate_next_level())
self.tree['is_ready'] = True
|
Generates the merkle tree.
|
def get_all_hits(self):
page_size = 100
search_rs = self.search_hits(page_size=page_size)
total_records = int(search_rs.TotalNumResults)
get_page_hits = lambda(page): self.search_hits(page_size=page_size, page_number=page)
page_nums = self._get_pages(page_size, total_records)
hit_sets = itertools.imap(get_page_hits, page_nums)
return itertools.chain.from_iterable(hit_sets)
|
Return all of a Requester's HITs
Despite what search_hits says, it does not return all hits, but
instead returns a page of hits. This method will pull the hits
from the server 100 at a time, but will yield the results
iteratively, so subsequent requests are made on demand.
|
def homepage():
if current_user.is_authenticated():
if not login_fresh():
logging.debug('User needs a fresh token')
abort(login.needs_refresh())
auth.claim_invitations(current_user)
build_list = operations.UserOps(current_user.get_id()).get_builds()
return render_template(
'home.html',
build_list=build_list,
show_video_and_promo_text=app.config['SHOW_VIDEO_AND_PROMO_TEXT'])
|
Renders the homepage.
|
def get_switch_macs(self, switch_ip=None, node=None, vlan=None, mac=None, port=None, verbose=0):
if (switch_ip == None):
if (node == None):
raise Exception('get_switch_macs() requires switch_ip or node parameter')
return None
switch_ip = node.get_ipaddr()
mac_obj = natlas_mac(self.config)
if (vlan == None):
macs = mac_obj.get_macs(switch_ip, verbose)
else:
macs = mac_obj.get_macs_for_vlan(switch_ip, vlan, verbose)
if ((mac == None) & (port == None)):
return macs if macs else []
ret = []
for m in macs:
if (mac != None):
if (re.match(mac, m.mac) == None):
continue
if (port != None):
if (re.match(port, m.port) == None):
continue
ret.append(m)
return ret
|
Get the CAM table from a switch.
Args:
switch_ip IP address of the device
node natlas_node from new_node()
vlan Filter results by VLAN
MAC Filter results by MAC address (regex)
port Filter results by port (regex)
verbose Display progress to stdout
switch_ip or node is required
Return:
Array of natlas_mac objects
|
def get_cdn_auth_token(self, app_id, hostname):
return self.send_job_and_wait(MsgProto(EMsg.ClientGetCDNAuthToken),
{
'app_id': app_id,
'host_name': hostname,
},
timeout=15
)
|
Get CDN authentication token
:param app_id: app id
:type app_id: :class:`int`
:param hostname: cdn hostname
:type hostname: :class:`str`
:return: `CMsgClientGetCDNAuthTokenResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver_2.proto#L585-L589>`_
:rtype: proto message
|
def normalize_signature(func):
@wraps(func)
def wrapper(*args, **kwargs):
if kwargs:
args = args, kwargs
if len(args) is 1:
args = args[0]
return func(args)
return wrapper
|
Decorator. Combine args and kwargs. Unpack single item tuples.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.