code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
|---|---|
def get_branding(self, branding_id):
connection = Connection(self.token)
connection.set_url(self.production, self.BRANDINGS_ID_URL % branding_id)
return connection.get_request()
|
Get a concrete branding
@branding_id: Id of the branding to fetch
@return Branding
|
def _event_funcs(self, event: str) -> Iterable[Callable]:
for func in self._events[event]:
yield func
|
Returns an Iterable of the functions subscribed to a event.
:param event: Name of the event.
:type event: str
:return: A iterable to do things with.
:rtype: Iterable
|
def _api_itemvalue(self, plugin, item, value=None, history=False, nb=0):
response.content_type = 'application/json; charset=utf-8'
if plugin not in self.plugins_list:
abort(400, "Unknown plugin %s (available plugins: %s)" % (plugin, self.plugins_list))
self.__update__()
if value is None:
if history:
ret = self.stats.get_plugin(plugin).get_stats_history(item, nb=int(nb))
else:
ret = self.stats.get_plugin(plugin).get_stats_item(item)
if ret is None:
abort(404, "Cannot get item %s%s in plugin %s" % (item, 'history ' if history else '', plugin))
else:
if history:
ret = None
else:
ret = self.stats.get_plugin(plugin).get_stats_value(item, value)
if ret is None:
abort(404, "Cannot get item %s(%s=%s) in plugin %s" % ('history ' if history else '', item, value, plugin))
return ret
|
Father method for _api_item and _api_value.
|
def nonoverlap(item_a, time_a, item_b, time_b, max_value):
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value)
|
Percentage of pixels in each object that do not overlap with the other object
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
|
def focus_first_child(self):
w, focuspos = self.get_focus()
child = self._tree.first_child_position(focuspos)
if child is not None:
self.set_focus(child)
|
move focus to first child of currently focussed one
|
def languages(self, key, value):
languages = self.get('languages', [])
values = force_list(value.get('a'))
for value in values:
for language in RE_LANGUAGE.split(value):
try:
name = language.strip().capitalize()
languages.append(pycountry.languages.get(name=name).alpha_2)
except KeyError:
pass
return languages
|
Populate the ``languages`` key.
|
def rightStatus(self, sheet):
'Compose right side of status bar.'
if sheet.currentThreads:
gerund = (' '+sheet.progresses[0].gerund) if sheet.progresses else ''
status = '%9d %2d%%%s' % (len(sheet), sheet.progressPct, gerund)
else:
status = '%9d %s' % (len(sheet), sheet.rowtype)
return status, 'color_status'
|
Compose right side of status bar.
|
def get_consensus_at(self, block_id):
query = 'SELECT consensus_hash FROM snapshots WHERE block_id = ?;'
args = (block_id,)
con = self.db_open(self.impl, self.working_dir)
rows = self.db_query_execute(con, query, args, verbose=False)
res = None
for r in rows:
res = r['consensus_hash']
con.close()
return res
|
Get the consensus hash at a given block.
Return the consensus hash if we have one for this block.
Return None if we don't
|
def search(self, params, standardize=False):
resp = self._request(ENDPOINTS['SEARCH'], params)
if not standardize:
return resp
for res in resp['result_data']:
res = self.standardize(res)
return resp
|
Get a list of person objects for the given search params.
:param params: Dictionary specifying the query parameters
:param standardize: Whether to standardize names and other features,
currently disabled for backwards compatibility. Currently
standardizes names, lowercases emails, and removes faculty label
from affiliation.
>>> people = d.search({'first_name': 'tobias', 'last_name': 'funke'})
|
def bs_values_df(run_list, estimator_list, estimator_names, n_simulate,
**kwargs):
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'})
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
bs_values_list = pu.parallel_apply(
nestcheck.error_analysis.run_bootstrap_values, run_list,
func_args=(estimator_list,), func_kwargs={'n_simulate': n_simulate},
tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in bs_values_list]
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (n_simulate,), (
'Should be n_simulate=' + str(n_simulate) + ' values in ' +
'each cell. The cell contains array with shape ' +
str(vals_shape))
return df
|
Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator.
|
def createThreeObjects():
objectA = zip(range(10), range(10))
objectB = [(0, 0), (2, 2), (1, 1), (1, 4), (4, 2), (4, 1)]
objectC = [(0, 0), (1, 1), (3, 1), (0, 1)]
return [objectA, objectB, objectC]
|
Helper function that creates a set of three objects used for basic
experiments.
:return: (list(list(tuple)) List of lists of feature / location pairs.
|
def _update_quoting_state(self, ch):
is_escaped = self.escaped
self.escaped = (not self.escaped and
ch == '\\' and
self.quotes != self.SQUOTE)
if self.escaped:
return ''
if is_escaped:
if self.quotes == self.DQUOTE:
if ch == '"':
return ch
return "{0}{1}".format('\\', ch)
return ch
if self.quotes is None:
if ch in (self.SQUOTE, self.DQUOTE):
self.quotes = ch
return ''
elif self.quotes == ch:
self.quotes = None
return ''
return ch
|
Update self.quotes and self.escaped
:param ch: str, current character
:return: ch if it was not used to update quoting state, else ''
|
def handle(self, data, **kwargs):
try:
if self.many:
return self.mapper.many(raw=self.raw, **self.mapper_kwargs).marshal(
data, role=self.role
)
else:
return self.mapper(
data=data,
obj=self.obj,
partial=self.partial,
**self.mapper_kwargs
).marshal(role=self.role)
except MappingInvalid as e:
self.handle_error(e)
|
Run marshalling for the specified mapper_class.
Supports both .marshal and .many().marshal Kim interfaces. Handles errors raised
during marshalling and automatically returns a HTTP error response.
:param data: Data to be marshaled.
:returns: Marshaled object according to mapper configuration
:raises: :class:`werkzeug.exceptions.UnprocessableEntity`
|
def path_from_row_pks(row, pks, use_rowid, quote=True):
if use_rowid:
bits = [row['rowid']]
else:
bits = [
row[pk]["value"] if isinstance(row[pk], dict) else row[pk]
for pk in pks
]
if quote:
bits = [urllib.parse.quote_plus(str(bit)) for bit in bits]
else:
bits = [str(bit) for bit in bits]
return ','.join(bits)
|
Generate an optionally URL-quoted unique identifier
for a row from its primary keys.
|
def get_int(self):
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
return int(token.value)
|
Read the next token and interpret it as an integer.
@raises dns.exception.SyntaxError:
@rtype: int
|
def watch(self, keys, on_watch, filters=None, start_revision=None, return_previous=None):
d = self._start_watching(keys, on_watch, filters, start_revision, return_previous)
def on_err(*args):
if args[0].type not in [CancelledError, ResponseFailed]:
self.log.warn('etcd watch terminated with "{error}"', error=args[0].type)
return args[0]
d.addErrback(on_err)
return d
|
Watch one or more keys or key sets and invoke a callback.
Watch watches for events happening or that have happened. The entire event history
can be watched starting from the last compaction revision.
:param keys: Watch these keys / key sets.
:type keys: list of bytes or list of instance of :class:`txaioetcd.KeySet`
:param on_watch: The callback to invoke upon receiving
a watch event.
:type on_watch: callable
:param filters: Any filters to apply.
:param start_revision: start_revision is an optional
revision to watch from (inclusive). No start_revision is "now".
:type start_revision: int
:param return_previous: Flag to request returning previous values.
:returns: A deferred that just fires when watching has started successfully,
or which fires with an error in case the watching could not be started.
:rtype: twisted.internet.Deferred
|
def _checkReturnTo(self, message, return_to):
try:
self._verifyReturnToArgs(message.toPostArgs())
except ProtocolError as why:
logging.exception("Verifying return_to arguments: %s" % (why, ))
return False
msg_return_to = message.getArg(OPENID_NS, 'return_to')
app_parts = urlparse(urinorm.urinorm(return_to))
msg_parts = urlparse(urinorm.urinorm(msg_return_to))
for part in range(0, 3):
if app_parts[part] != msg_parts[part]:
return False
return True
|
Check an OpenID message and its openid.return_to value
against a return_to URL from an application. Return True on
success, False on failure.
|
def structure_repr(self):
ret = '{%s}' % ', '.join([str(x) for x in self.elements])
return self._wrap_packed(ret)
|
Return the LLVM IR for the structure representation
|
def in_group(self, group, dn=False):
if dn:
return group in self.groups()
return group.check_member(self)
|
Get whether or not the bound CSH LDAP member object is part of a
group.
Arguments:
group -- the CSHGroup object (or distinguished name) of the group to
check membership for
|
def set_pending_symbol(self, pending_symbol=None):
if pending_symbol is None:
pending_symbol = CodePointArray()
self.value = bytearray()
self.pending_symbol = pending_symbol
self.line_comment = False
return self
|
Sets the context's ``pending_symbol`` with the given unicode sequence and resets the context's ``value``.
If the input is None, an empty :class:`CodePointArray` is used.
|
def write_params(path, *args, **dicts):
path = Path(path)
if not path.parent.is_dir():
path.parent.mkdir(parents=True)
if len(args) == 1:
d = args[0]
with path.open('w') as f:
for key in d:
f.write(key + ' = ' + str(d[key]) + '\n')
else:
with path.open('w') as f:
for k, d in dicts.items():
f.write('[' + k + ']\n')
for key, val in d.items():
f.write(key + ' = ' + str(val) + '\n')
|
Write parameters to file, so that it's readable by read_params.
Uses INI file format.
|
def check_for_stalled_tasks():
from api.models.tasks import Task
for task in Task.objects.filter(status_is_running=True):
if not task.is_responsive():
task.system_error()
if task.is_timed_out():
task.timeout_error()
|
Check for tasks that are no longer sending a heartbeat
|
def all(self, paths, access=None):
self.failures = [path for path in paths if not
isvalid(path, access, filetype='all')]
return not self.failures
|
Verify list of paths
|
def expanduser(path):
if hdfs_fs.default_is_local():
return os.path.expanduser(path)
m = re.match(r'^~([^/]*)', path)
if m is None:
return path
user = m.groups()[0] or common.DEFAULT_USER
return '/user/%s%s' % (user, path[m.end(1):])
|
Replace initial ``~`` or ``~user`` with the user's home directory.
**NOTE:** if the default file system is HDFS, the ``~user`` form is
expanded regardless of the user's existence.
|
def to_dict(obj):
json_obj = {'type' : repr(obj)}
if obj.is_failed:
json_obj['errors'] = obj.errors
elif obj.is_success:
json_obj['modelOutput'] = obj.model_output
return json_obj
|
Generate a JSON serialization for the run state object.
Returns
-------
Json-like object
Json serialization of model run state object
|
def _has_argument(func):
if hasattr(inspect, 'signature'):
sig = inspect.signature(func)
return bool(sig.parameters)
else:
return bool(inspect.getargspec(func).args)
|
Test whether a function expects an argument.
:param func:
The function to be tested for existence of an argument.
|
def authInsert(user, role, group, site):
if not role: return True
for k, v in user['roles'].iteritems():
for g in v['group']:
if k in role.get(g, '').split(':'):
return True
return False
|
Authorization function for general insert
|
def register_metric_descriptor(self, oc_md):
metric_type = self.get_metric_type(oc_md)
with self._md_lock:
if metric_type in self._md_cache:
return self._md_cache[metric_type]
descriptor = self.get_metric_descriptor(oc_md)
project_name = self.client.project_path(self.options.project_id)
sd_md = self.client.create_metric_descriptor(project_name, descriptor)
with self._md_lock:
self._md_cache[metric_type] = sd_md
return sd_md
|
Register a metric descriptor with stackdriver.
|
def group(self):
"Group inherited from items"
if self._group:
return self._group
group = get_ndmapping_label(self, 'group') if len(self) else None
if group is None:
return type(self).__name__
return group
|
Group inherited from items
|
def triads(key):
if _triads_cache.has_key(key):
return _triads_cache[key]
res = map(lambda x: triad(x, key), keys.get_notes(key))
_triads_cache[key] = res
return res
|
Return all the triads in key.
Implemented using a cache.
|
def _create_storage_profile(self):
if self.image_publisher:
storage_profile = {
'image_reference': {
'publisher': self.image_publisher,
'offer': self.image_offer,
'sku': self.image_sku,
'version': self.image_version
},
}
else:
for image in self.compute.images.list():
if image.name == self.image_id:
image_id = image.id
break
else:
raise AzureCloudException(
'Image with name {0} not found.'.format(self.image_id)
)
storage_profile = {
'image_reference': {
'id': image_id
}
}
return storage_profile
|
Create the storage profile for the instance.
Image reference can be a custom image name or a published urn.
|
def install_extension(conn, extension: str):
query = 'CREATE EXTENSION IF NOT EXISTS "%s";'
with conn.cursor() as cursor:
cursor.execute(query, (AsIs(extension),))
installed = check_extension(conn, extension)
if not installed:
raise psycopg2.ProgrammingError(
'Postgres extension failed installation.', extension
)
|
Install Postgres extension.
|
def __get_jp(self, extractor_processor, sub_output=None):
if sub_output is None and extractor_processor.output_field is None:
raise ValueError(
"ExtractorProcessors input paths cannot be unioned across fields. Please specify either a sub_output or use a single scalar output_field")
if extractor_processor.get_output_jsonpath_with_name(sub_output) is not None:
return extractor_processor.get_output_jsonpath_with_name(sub_output)
else:
return extractor_processor.get_output_jsonpath(sub_output)
|
Tries to get name from ExtractorProcessor to filter on first.
Otherwise falls back to filtering based on its metadata
|
def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):
ds = self._to_temp_dataset().roll(
shifts=shifts, roll_coords=roll_coords, **shifts_kwargs)
return self._from_temp_dataset(ds)
|
Roll this array by an offset along one or more dimensions.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
roll_coords : bool
Indicates whether to roll the coordinates by the offset
The current default of roll_coords (None, equivalent to True) is
deprecated and will change to False in a future version.
Explicitly pass roll_coords to silence the warning.
**shifts : keyword arguments of the form {dim: offset}
Integer offset to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
Returns
-------
rolled : DataArray
DataArray with the same attributes but rolled data and coordinates.
See also
--------
shift
Examples
--------
>>> arr = xr.DataArray([5, 6, 7], dims='x')
>>> arr.roll(x=1)
<xarray.DataArray (x: 3)>
array([7, 5, 6])
Coordinates:
* x (x) int64 2 0 1
|
def append_surface(self, name, surface, alpha=1.):
self.insert_surface(position=self.df_surfaces.index.shape[0],
name=name, surface=surface, alpha=alpha)
|
Append Cairo surface as new layer on top of existing layers.
Args
----
name (str) : Name of layer.
surface (cairo.ImageSurface) : Surface to render.
alpha (float) : Alpha/transparency level in the range `[0, 1]`.
|
def resize(self, targ_sz, new_path='tmp', resume=True, fn=None):
new_ds = []
dls = [self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl]
if self.test_dl: dls += [self.test_dl, self.test_aug_dl]
else: dls += [None,None]
t = tqdm_notebook(dls)
for dl in t: new_ds.append(self.resized(dl, targ_sz, new_path, resume, fn))
t.close()
return self.__class__(new_ds[0].path, new_ds, self.bs, self.num_workers, self.classes)
|
Resizes all the images in the train, valid, test folders to a given size.
Arguments:
targ_sz (int): the target size
new_path (str): the path to save the resized images (default tmp)
resume (bool): if True, check for images in the DataSet that haven't been resized yet (useful if a previous resize
operation was aborted)
fn (function): optional custom resizing function
|
def deploy(self, machine):
log.debug("machine id: %s." % machine)
path = self.path + "/machines"
value, metadata = yield self.client.get(path)
machines = json.loads(value)
machines.append(machine)
yield self.client.set(path, json.dumps(machines))
|
Deploy service.
|
def query_all(kind='1', by_count=False, by_order=True):
if by_count:
recs = TabTag.select().where(TabTag.kind == kind).order_by(TabTag.count.desc())
elif by_order:
recs = TabTag.select().where(TabTag.kind == kind).order_by(TabTag.order)
else:
recs = TabTag.select().where(TabTag.kind == kind).order_by(TabTag.uid)
return recs
|
Qeury all the categories, order by count or defined order.
|
def sample(self, fraction, seed=None, exact=False):
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (len(self) == 0):
return SArray()
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
with cython_context():
return SArray(_proxy=self.__proxy__.sample(fraction, seed, exact))
|
Create an SArray which contains a subsample of the current SArray.
Parameters
----------
fraction : float
Fraction of the rows to fetch. Must be between 0 and 1.
if exact is False (default), the number of rows returned is
approximately the fraction times the number of rows.
seed : int, optional
The random seed for the random number generator.
exact: bool, optional
Defaults to False. If exact=True, an exact fraction is returned,
but at a performance penalty.
Returns
-------
out : SArray
The new SArray which contains the subsampled rows.
Examples
--------
>>> sa = turicreate.SArray(range(10))
>>> sa.sample(.3)
dtype: int
Rows: 3
[2, 6, 9]
|
def create(self, unique_name, domain_suffix=values.unset):
data = values.of({'UniqueName': unique_name, 'DomainSuffix': domain_suffix, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], )
|
Create a new EnvironmentInstance
:param unicode unique_name: The unique_name
:param unicode domain_suffix: The domain_suffix
:returns: Newly created EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
|
def findCampaigns(ra, dec):
logger.disabled = True
campaigns_visible = []
for c in fields.getFieldNumbers():
fovobj = fields.getKeplerFov(c)
if onSiliconCheck(ra, dec, fovobj):
campaigns_visible.append(c)
logger.disabled = True
return campaigns_visible
|
Returns a list of the campaigns that cover a given position.
Parameters
----------
ra, dec : float, float
Position in decimal degrees (J2000).
Returns
-------
campaigns : list of int
A list of the campaigns that cover the given position.
|
def get_aa_code(aa_letter):
aa_code = None
if aa_letter != 'X':
for key, val in standard_amino_acids.items():
if key == aa_letter:
aa_code = val
return aa_code
|
Get three-letter aa code if possible. If not, return None.
If three-letter code is None, will have to find this later from the filesystem.
Parameters
----------
aa_letter : str
One-letter amino acid code.
Returns
-------
aa_code : str, or None
Three-letter aa code.
|
def make_bubble_surface(dims=DEFAULT_DIMS, repeat=3):
gradients = make_gradients(dims)
return (
np.sin((gradients[0] - 0.5) * repeat * np.pi) *
np.sin((gradients[1] - 0.5) * repeat * np.pi))
|
Makes a surface from the product of sine functions on each axis.
Args:
dims (pair): the dimensions of the surface to create
repeat (int): the frequency of the waves is set to ensure this many
repetitions of the function
Returns:
surface: A surface.
|
def relpath(self):
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
|
Return this path as a relative path,
based from the current working directory.
|
def process_template(self, sql, **kwargs):
template = self.env.from_string(sql)
kwargs.update(self.context)
return template.render(kwargs)
|
Processes a sql template
>>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
>>> process_template(sql)
"SELECT '2017-01-01T00:00:00'"
|
def _update_feature_log_prob(self, alpha):
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
|
Apply smoothing to raw counts and recompute log probabilities
|
def _options(self):
if self._options_cache is None:
target_url = self.client.get_url(self._URL_KEY, 'OPTIONS', 'options')
r = self.client.request('OPTIONS', target_url)
self._options_cache = r.json()
return self._options_cache
|
Returns a raw options object
:rtype: dict
|
def _output(self, s):
if s.lower().startswith(b'host: '):
self._buffer.insert(1, s)
else:
self._buffer.append(s)
|
Host header should always be first
|
def format_explanation(explanation, original_msg=None):
if not conf.is_message_introspection_enabled() and original_msg:
return original_msg
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result)
|
This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
|
def packageGraph(self, packagelevel=None):
packages = {}
for module in self.listModules():
package_name = self.packageOf(module.modname, packagelevel)
if package_name not in packages:
dirname = os.path.dirname(module.filename)
packages[package_name] = Module(package_name, dirname)
package = packages[package_name]
for name in module.imports:
package_name = self.packageOf(name, packagelevel)
if package_name != package.modname:
package.imports.add(package_name)
graph = ModuleGraph()
graph.modules = packages
return graph
|
Convert a module graph to a package graph.
|
def register_path(self, path, modified_time=None):
if not foundations.common.path_exists(path):
raise foundations.exceptions.PathExistsError("{0} | '{1}' path doesn't exists!".format(
self.__class__.__name__, path))
if path in self:
raise umbra.exceptions.PathRegistrationError("{0} | '{1}' path is already registered!".format(
self.__class__.__name__, path))
self.__paths[path] = (self.get_path_modified_time(
path) if modified_time is None else modified_time, os.path.isfile(path))
return True
|
Registers given path.
:param path: Path name.
:type path: unicode
:param modified_time: Custom modified time.
:type modified_time: int or float
:return: Method success.
:rtype: bool
|
def change_and_save(self, update_only_changed_fields=False, **changed_fields):
bulk_change_and_save(self, update_only_changed_fields=update_only_changed_fields, **changed_fields)
return self.filter()
|
Changes a given `changed_fields` on each object in the queryset, saves objects
and returns the changed objects in the queryset.
|
def build_pages(self):
for root, _, files in os.walk(self.pages_dir):
base_dir = root.replace(self.pages_dir, "").lstrip("/")
if not base_dir.startswith("_"):
for f in files:
src_file = os.path.join(base_dir, f)
self._build_page(src_file)
|
Iterate over the pages_dir and build the pages
|
def _pass_variable(self):
pass_var = []
for var in os.environ.keys():
expVAR = var.split("_")
if expVAR[0] == self.prgnam.upper() and expVAR[1] != "PATH":
pass_var.append("{0}={1}".format(expVAR[1], os.environ[var]))
return pass_var
|
Return enviroment variables
|
def getAllSecrets(version="", region=None, table="credential-store",
context=None, credential=None, session=None, **kwargs):
if session is None:
session = get_session(**kwargs)
dynamodb = session.resource('dynamodb', region_name=region)
kms = session.client('kms', region_name=region)
secrets = listSecrets(region, table, **kwargs)
if credential and WILDCARD_CHAR in credential:
names = set(expand_wildcard(credential,
[x["name"]
for x in secrets]))
else:
names = set(x["name"] for x in secrets)
pool = ThreadPool(min(len(names), THREAD_POOL_MAX_SIZE))
results = pool.map(
lambda credential: getSecret(credential, version, region, table, context, dynamodb, kms, **kwargs),
names)
pool.close()
pool.join()
return dict(zip(names, results))
|
fetch and decrypt all secrets
|
def is_gentarget(self, target):
if self.gentarget_type:
return isinstance(target, self.gentarget_type)
else:
raise NotImplementedError
|
Predicate which determines whether the target in question is relevant to this codegen task.
E.g., the JaxbGen task considers JaxbLibrary targets to be relevant, and nothing else.
:API: public
:param Target target: The target to check.
:return: True if this class can generate code for the given target, False otherwise.
|
def _parse_throttle(self, tablename, throttle):
amount = []
desc = self.describe(tablename)
throughputs = [desc.read_throughput, desc.write_throughput]
for value, throughput in zip(throttle[1:], throughputs):
if value == "*":
amount.append(0)
elif value[-1] == "%":
amount.append(throughput * float(value[:-1]) / 100.0)
else:
amount.append(float(value))
cap = Capacity(*amount)
return RateLimit(total=cap, callback=self._on_throttle)
|
Parse a 'throttle' statement and return a RateLimit
|
def _reset(self, force=False):
if not self._closed and (force or self._transaction):
try:
self.rollback()
except Exception:
pass
|
Reset a tough connection.
Rollback if forced or the connection was in a transaction.
|
def perimeter(self):
if self._is_completely_masked:
return np.nan * u.pix
else:
from skimage.measure import perimeter
return perimeter(~self._total_mask, neighbourhood=4) * u.pix
|
The total perimeter of the source segment, approximated lines
through the centers of the border pixels using a 4-connectivity.
If any masked pixels make holes within the source segment, then
the perimeter around the inner hole (e.g. an annulus) will also
contribute to the total perimeter.
|
def make_json_formatter(graph):
return {
"()": graph.config.logging.json_formatter.formatter,
"fmt": graph.config.logging.json_required_keys,
}
|
Create the default json formatter.
|
def vault_file(env, default):
home = os.environ['HOME'] if 'HOME' in os.environ else \
os.environ['USERPROFILE']
filename = os.environ.get(env, os.path.join(home, default))
filename = abspath(filename)
if os.path.exists(filename):
return filename
return None
|
The path to a misc Vault file
This function will check for the env override on a file
path, compute a fully qualified OS appropriate path to
the desired file and return it if it exists. Otherwise
returns None
|
def parse_routing_info(cls, records):
if len(records) != 1:
raise RoutingProtocolError("Expected exactly one record")
record = records[0]
routers = []
readers = []
writers = []
try:
servers = record["servers"]
for server in servers:
role = server["role"]
addresses = []
for address in server["addresses"]:
addresses.append(SocketAddress.parse(address, DEFAULT_PORT))
if role == "ROUTE":
routers.extend(addresses)
elif role == "READ":
readers.extend(addresses)
elif role == "WRITE":
writers.extend(addresses)
ttl = record["ttl"]
except (KeyError, TypeError):
raise RoutingProtocolError("Cannot parse routing info")
else:
return cls(routers, readers, writers, ttl)
|
Parse the records returned from a getServers call and
return a new RoutingTable instance.
|
def average_sharded_losses(sharded_losses):
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
|
Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
|
def load_scene(self, item):
scene = Scene.from_config(self.pyvlx, item)
self.add(scene)
|
Load scene from json.
|
def get_errors(self):
return [{cr.component_name: cr.get_error()}
for cr in self.component_results if cr.has_error()]
|
If there were any business errors fetching data for this property,
returns the error messages.
Returns:
string - the error message, or None if there was no error.
|
def delete_multireddit(self, name, *args, **kwargs):
url = self.config['multireddit_about'].format(user=self.user.name,
multi=name)
if not self._use_oauth:
self.http.headers['x-modhash'] = self.modhash
try:
self.request(url, data={}, method='DELETE', *args, **kwargs)
finally:
if not self._use_oauth:
del self.http.headers['x-modhash']
|
Delete a Multireddit.
Any additional parameters are passed directly into
:meth:`~praw.__init__.BaseReddit.request`
|
def update(self):
args = {attr: getattr(self, attr) for attr in self.to_update}
_perform_command(self, 'user_update', args)
|
Update the user's details on Todoist.
This method must be called to register any local attribute changes
with Todoist.
>>> from pytodoist import todoist
>>> user = todoist.login('john.doe@gmail.com', 'password')
>>> user.full_name = 'John Smith'
>>> # At this point Todoist still thinks the name is 'John Doe'.
>>> user.update()
>>> # Now the name has been updated on Todoist.
|
def enable_key(self):
print("This command will enable a disabled key.")
apiKeyID = input("API Key ID: ")
try:
key = self._curl_bitmex("/apiKey/enable",
postdict={"apiKeyID": apiKeyID})
print("Key with ID %s enabled." % key["id"])
except:
print("Unable to enable key, please try again.")
self.enable_key()
|
Enable an existing API Key.
|
def upload_keys(self, device_keys=None, one_time_keys=None):
content = {}
if device_keys:
content["device_keys"] = device_keys
if one_time_keys:
content["one_time_keys"] = one_time_keys
return self._send("POST", "/keys/upload", content=content)
|
Publishes end-to-end encryption keys for the device.
Said device must be the one used when logging in.
Args:
device_keys (dict): Optional. Identity keys for the device. The required
keys are:
| user_id (str): The ID of the user the device belongs to. Must match
the user ID used when logging in.
| device_id (str): The ID of the device these keys belong to. Must match
the device ID used when logging in.
| algorithms (list<str>): The encryption algorithms supported by this
device.
| keys (dict): Public identity keys. Should be formatted as
<algorithm:device_id>: <key>.
| signatures (dict): Signatures for the device key object. Should be
formatted as <user_id>: {<algorithm:device_id>: <key>}
one_time_keys (dict): Optional. One-time public keys. Should be
formatted as <algorithm:key_id>: <key>, the key format being
determined by the algorithm.
|
def form(cls, name, type_=Type.String, description=None, required=None, default=None,
minimum=None, maximum=None, enum=None, **options):
if minimum is not None and maximum is not None and minimum > maximum:
raise ValueError("Minimum must be less than or equal to the maximum.")
return cls(name, In.Form, type_, None, description,
required=required, default=default,
minimum=minimum, maximum=maximum,
enum=enum, **options)
|
Define form parameter.
|
def answer_approval(self, issue_id_or_key, approval_id, decision):
url = 'rest/servicedeskapi/request/{0}/approval/{1}'.format(issue_id_or_key, approval_id)
data = {'decision': decision}
return self.post(url, headers=self.experimental_headers, data=data)
|
Answer a pending approval
:param issue_id_or_key: str
:param approval_id: str
:param decision: str
:return:
|
def html_to_text(html, base_url='', bodywidth=CONFIG_DEFAULT):
def _patched_handle_charref(c):
self = h
charref = self.charref(c)
if self.code or self.pre:
charref = cgi.escape(charref)
self.o(charref, 1)
def _patched_handle_entityref(c):
self = h
entityref = self.entityref(c)
if self.code or self.pre:
entityref = cgi.escape(entityref)
self.o(entityref, 1)
h = HTML2Text(baseurl=base_url, bodywidth=config.BODY_WIDTH if bodywidth is CONFIG_DEFAULT else bodywidth)
h.handle_entityref = _patched_handle_entityref
h.handle_charref = _patched_handle_charref
return h.handle(html).rstrip()
|
Convert a HTML mesasge to plain text.
|
def importTTX(self):
import os
import re
prefix = "com.github.fonttools.ttx"
sfntVersionRE = re.compile('(^<ttFont\s+)(sfntVersion=".*"\s+)(.*>$)',
flags=re.MULTILINE)
if not hasattr(self.ufo, "data"):
return
if not self.ufo.data.fileNames:
return
for path in self.ufo.data.fileNames:
foldername, filename = os.path.split(path)
if (foldername == prefix and filename.endswith(".ttx")):
ttx = self.ufo.data[path].decode('utf-8')
ttx = sfntVersionRE.sub(r'\1\3', ttx)
fp = BytesIO(ttx.encode('utf-8'))
self.otf.importXML(fp)
|
Merge TTX files from data directory "com.github.fonttools.ttx"
**This should not be called externally.** Subclasses
may override this method to handle the bounds creation
in a different way if desired.
|
def FoldValue(self, value):
if value is False and self._data_type_definition.false_value is not None:
return self._data_type_definition.false_value
if value is True and self._data_type_definition.true_value is not None:
return self._data_type_definition.true_value
raise ValueError('No matching True and False values')
|
Folds the data type into a value.
Args:
value (object): value.
Returns:
object: folded value.
Raises:
ValueError: if the data type definition cannot be folded into the value.
|
def bounding_ellipses(self):
if (self.linear_growth):
a1 = self.sma - self.astep / 2.
a2 = self.sma + self.astep / 2.
else:
a1 = self.sma * (1. - self.astep / 2.)
a2 = self.sma * (1. + self.astep / 2.)
return a1, a2
|
Compute the semimajor axis of the two ellipses that bound the
annulus where integrations take place.
Returns
-------
sma1, sma2 : float
The smaller and larger values of semimajor axis length that
define the annulus bounding ellipses.
|
def vectorize_utterance_ohe(self, utterance):
for i, word in enumerate(utterance):
if not word in self.vocab_list:
utterance[i] = '<unk>'
ie_utterance = self.swap_pad_and_zero(self.ie.transform(utterance))
ohe_utterance = np.array(self.ohe.transform(ie_utterance.reshape(len(ie_utterance), 1)))
return ohe_utterance
|
Take in a tokenized utterance and transform it into a sequence of one-hot vectors
|
def add_data(self, rawdata):
for data in rawdata:
try:
item = data[0]
if item[0] == 2:
continue
if item[0] != 0:
warnings.warn(f"Unknown message type '{item[0]}'", Warning)
continue
item = item[1]
target = str(item[0])
try:
data = item[1]
except IndexError:
data = dict()
try:
method = getattr(self, self.__head + target)
method(data)
except AttributeError:
self._handle_unhandled(target, data)
except IndexError:
LOGGER.warning("Wrongly constructed message received: %r", data)
self.conn.process_queues()
|
Add data to given room's state
|
def get_high_water_mark(self, mark_type, obstory_name=None):
if obstory_name is None:
obstory_name = self.obstory_name
obstory = self.get_obstory_from_name(obstory_name)
key_id = self.get_hwm_key_id(mark_type)
self.con.execute('SELECT time FROM archive_highWaterMarks WHERE markType=%s AND observatoryId=%s',
(key_id, obstory['uid']))
results = self.con.fetchall()
if len(results) > 0:
return results[0]['time']
return None
|
Retrieves the high water mark for a given obstory, defaulting to the current installation ID
:param string mark_type:
The type of high water mark to set
:param string obstory_name:
The obstory ID to check for, or the default installation ID if not specified
:return:
A UTC datetime for the high water mark, or None if none was found.
|
def active_serving_watcher(backend, kitchen, period):
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
click.secho('%s - Watching Active OrderRun Changes in Kitchen %s' % (get_datetime(), use_kitchen), fg='green')
DKCloudCommandRunner.watch_active_servings(backend.dki, use_kitchen, period)
while True:
try:
DKCloudCommandRunner.join_active_serving_watcher_thread_join()
if not DKCloudCommandRunner.watcher_running():
break
except KeyboardInterrupt:
print 'KeyboardInterrupt'
exit_gracefully(None, None)
exit(0)
|
Watches all cooking Recipes in a Kitchen
Provide the kitchen name as an argument or be in a Kitchen folder.
|
def register_phonon_task(self, *args, **kwargs):
kwargs["task_class"] = PhononTask
return self.register_task(*args, **kwargs)
|
Register a phonon task.
|
def set_blocking(fd, blocking=True):
old_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
if blocking:
new_flag = old_flag & ~ os.O_NONBLOCK
else:
new_flag = old_flag | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, new_flag)
return not bool(old_flag & os.O_NONBLOCK)
|
Set the given file-descriptor blocking or non-blocking.
Returns the original blocking status.
|
def posttrans_hook(conduit):
if 'SALT_RUNNING' not in os.environ:
with open(CK_PATH, 'w') as ck_fh:
ck_fh.write('{chksum} {mtime}\n'.format(chksum=_get_checksum(), mtime=_get_mtime()))
|
Hook after the package installation transaction.
:param conduit:
:return:
|
def create_magic_packet(macaddress):
if len(macaddress) == 12:
pass
elif len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
data = b'FFFFFFFFFFFF' + (macaddress * 16).encode()
send_data = b''
for i in range(0, len(data), 2):
send_data += struct.pack(b'B', int(data[i: i + 2], 16))
return send_data
|
Create a magic packet.
A magic packet is a packet that can be used with the for wake on lan
protocol to wake up a computer. The packet is constructed from the
mac address given as a parameter.
Args:
macaddress (str): the mac address that should be parsed into a
magic packet.
|
def parse_chains(data):
chains = odict()
for line in data.splitlines(True):
m = re_chain.match(line)
if m:
policy = None
if m.group(2) != '-':
policy = m.group(2)
chains[m.group(1)] = {
'policy': policy,
'packets': int(m.group(3)),
'bytes': int(m.group(4)),
}
return chains
|
Parse the chain definitions.
|
def stress(ref_cds, est_cds):
ref_dists = pdist(ref_cds)
est_dists = pdist(est_cds)
return np.sqrt(((ref_dists - est_dists)**2).sum() / (ref_dists**2).sum())
|
Kruskal's stress
|
def response(self, component_id, component=None, **kwargs):
if component_id in self._responses:
raise DuplicateComponentNameError(
'Another response with name "{}" is already registered.'.format(
component_id
)
)
component = component or {}
ret = component.copy()
for plugin in self._plugins:
try:
ret.update(plugin.response_helper(component, **kwargs) or {})
except PluginMethodNotImplementedError:
continue
self._responses[component_id] = ret
return self
|
Add a response which can be referenced.
:param str component_id: ref_id to use as reference
:param dict component: response fields
:param dict kwargs: plugin-specific arguments
|
def DomainFactory(domain_name, cmds):
klass = type(str(domain_name), (BaseDomain,), {})
for c in cmds:
command = get_command(domain_name, c['name'])
setattr(klass, c['name'], classmethod(command))
return klass
|
Dynamically create Domain class and set it's methods.
|
def zpopmax(self, name, count=None):
args = (count is not None) and [count] or []
options = {
'withscores': True
}
return self.execute_command('ZPOPMAX', name, *args, **options)
|
Remove and return up to ``count`` members with the highest scores
from the sorted set ``name``.
|
async def restart(request):
def wait_and_restart():
log.info('Restarting server')
sleep(1)
os.system('kill 1')
Thread(target=wait_and_restart).start()
return web.json_response({"message": "restarting"})
|
Returns OK, then waits approximately 1 second and restarts container
|
def com_google_fonts_check_glyf_unused_data(ttFont):
try:
expected_glyphs = len(ttFont.getGlyphOrder())
actual_glyphs = len(ttFont['glyf'].glyphs)
diff = actual_glyphs - expected_glyphs
if diff < 0:
yield FAIL, Message("unreachable-data",
("Glyf table has unreachable data at the end of "
" the table. Expected glyf table length {}"
" (from loca table), got length"
" {} (difference: {})").format(
expected_glyphs, actual_glyphs, diff))
elif not diff:
yield PASS, "There is no unused data at the end of the glyf table."
else:
raise Exception("Bug: fontTools did not raise an expected exception.")
except fontTools.ttLib.TTLibError as error:
if "not enough 'glyf' table data" in format(error):
yield FAIL, Message("missing-data",
("Loca table references data beyond"
" the end of the glyf table."
" Expected glyf table length {}"
" (from loca table).").format(expected_glyphs))
else:
raise Exception("Bug: Unexpected fontTools exception.")
|
Is there any unused data at the end of the glyf table?
|
def create_with_dst_resource_provisioning(
cls, cli, src_resource_id, dst_resource_config,
max_time_out_of_sync, name=None, remote_system=None,
src_spa_interface=None, src_spb_interface=None,
dst_spa_interface=None, dst_spb_interface=None,
dst_resource_element_configs=None, auto_initiate=None,
hourly_snap_replication_policy=None,
daily_snap_replication_policy=None, replicate_existing_snaps=None):
req_body = cli.make_body(
srcResourceId=src_resource_id,
dstResourceConfig=dst_resource_config,
maxTimeOutOfSync=max_time_out_of_sync,
name=name, remoteSystem=remote_system,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface,
dstResourceElementConfigs=dst_resource_element_configs,
autoInitiate=auto_initiate,
hourlySnapReplicationPolicy=hourly_snap_replication_policy,
dailySnapReplicationPolicy=daily_snap_replication_policy,
replicateExistingSnaps=replicate_existing_snaps)
resp = cli.type_action(
cls().resource_class,
'createReplicationSessionWDestResProvisioning',
**req_body)
resp.raise_if_err()
session_resp = resp.first_content['id']
return cls.get(cli, _id=session_resp['id'])
|
Create a replication session along with destination resource
provisioning.
:param cli: the rest cli.
:param src_resource_id: id of the replication source, could be
lun/fs/cg.
:param dst_resource_config: `UnityResourceConfig` object. The user
chosen config for destination resource provisioning. `pool_id` and
`size` are required for creation.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param name: name of the replication.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param src_spa_interface: `UnityRemoteInterface` object. The
replication interface for source SPA.
:param src_spb_interface: `UnityRemoteInterface` object. The
replication interface for source SPB.
:param dst_spa_interface: `UnityRemoteInterface` object. The
replication interface for destination SPA.
:param dst_spb_interface: `UnityRemoteInterface` object. The
replication interface for destination SPB.
:param dst_resource_element_configs: List of `UnityResourceConfig`
objects. The user chose config for each of the member element of
the destination resource.
:param auto_initiate: indicates whether to perform the first
replication sync automatically.
True - perform the first replication sync automatically.
False - perform the first replication sync manually.
:param hourly_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating hourly scheduled snaps of the
source resource.
:param daily_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating daily scheduled snaps of the
source resource.
:param replicate_existing_snaps: indicates whether or not to replicate
snapshots already existing on the resource.
:return: the newly created replication session.
|
def render_tree(root, child_func, prune=0, margin=[0], visited=None):
rname = str(root)
if visited is None:
visited = {}
children = child_func(root)
retval = ""
for pipe in margin[:-1]:
if pipe:
retval = retval + "| "
else:
retval = retval + " "
if rname in visited:
return retval + "+-[" + rname + "]\n"
retval = retval + "+-" + rname + "\n"
if not prune:
visited = copy.copy(visited)
visited[rname] = 1
for i in range(len(children)):
margin.append(i < len(children)-1)
retval = retval + render_tree(children[i], child_func, prune, margin, visited)
margin.pop()
return retval
|
Render a tree of nodes into an ASCII tree view.
:Parameters:
- `root`: the root node of the tree
- `child_func`: the function called to get the children of a node
- `prune`: don't visit the same node twice
- `margin`: the format of the left margin to use for children of root. 1 results in a pipe, and 0 results in no pipe.
- `visited`: a dictionary of visited nodes in the current branch if not prune, or in the whole tree if prune.
|
def setups(self):
result = []
has_options = self.base_object.is_optionhandler
enm = javabridge.get_enumeration_wrapper(javabridge.call(self.jobject, "setups", "()Ljava/util/Enumeration;"))
while enm.hasMoreElements():
if has_options:
result.append(OptionHandler(enm.nextElement()))
else:
result.append(JavaObject(enm.nextElement()))
return result
|
Generates and returns all the setups according to the parameter search space.
:return: the list of configured objects (of type JavaObject)
:rtype: list
|
def solution_violations(solution, events, slots):
array = converter.solution_to_array(solution, events, slots)
return array_violations(array, events, slots)
|
Take a solution and return a list of violated constraints
Parameters
----------
solution: list or tuple
a schedule in solution form
events : list or tuple
of resources.Event instances
slots : list or tuple
of resources.Slot instances
Returns
-------
Generator
of a list of strings indicating the nature of the violated
constraints
|
def temp_path(file_name=None):
if file_name is None:
file_name = generate_timestamped_string("wtf_temp_file")
return os.path.join(tempfile.gettempdir(), file_name)
|
Gets a temp path.
Kwargs:
file_name (str) : if file name is specified, it gets appended to the temp dir.
Usage::
temp_file_path = temp_path("myfile")
copyfile("myfile", temp_file_path) # copies 'myfile' to '/tmp/myfile'
|
def flush(self):
for key in self.grouping_info.keys():
if self._should_flush(key):
self._write_current_buffer_for_group_key(key)
|
Ensure all remaining buffers are written.
|
def screenshot(self):
b64data = self.http.get('/screenshot').value
raw_data = base64.b64decode(b64data)
from PIL import Image
buff = io.BytesIO(raw_data)
return Image.open(buff)
|
Take screenshot with session check
Returns:
PIL.Image
|
def load(path, service=None, hostport=None, module_name=None):
if not path.endswith('.thrift'):
service, path = path, service
module = thriftrw.load(path=path, name=module_name)
return TChannelThriftModule(service, module, hostport)
|
Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file.
|
def fix_facets(self):
facets = self.facets
for key in list(facets.keys()):
_type = facets[key].get("_type", "unknown")
if _type == "date_histogram":
for entry in facets[key].get("entries", []):
for k, v in list(entry.items()):
if k in ["count", "max", "min", "total_count", "mean", "total"]:
continue
if not isinstance(entry[k], datetime):
entry[k] = datetime.utcfromtimestamp(v / 1e3)
|
This function convert date_histogram facets to datetime
|
def convert_units_to_base_units(units):
total_factor = 1
new_units = []
for unit in units:
if unit not in BASE_UNIT_CONVERSIONS:
continue
factor, new_unit = BASE_UNIT_CONVERSIONS[unit]
total_factor *= factor
new_units.append(new_unit)
new_units.sort()
return total_factor, tuple(new_units)
|
Convert a set of units into a set of "base" units.
Returns a 2-tuple of `factor, new_units`.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.