code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def convolve(signal,kernel):
pad=np.ones(len(kernel)/2)
signal=np.concatenate((pad*signal[0],signal,pad*signal[-1]))
signal=np.convolve(signal,kernel,mode='same')
signal=signal[len(pad):-len(pad)]
return signal
|
This applies a kernel to a signal through convolution and returns the result.
Some magic is done at the edges so the result doesn't apprach zero:
1. extend the signal's edges with len(kernel)/2 duplicated values
2. perform the convolution ('same' mode)
3. slice-off the ends we added
4. return the same number of points as the original
|
def gauges(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return GaugesNode()
|
Gaug.es template tag.
Renders Javascript code to gaug.es testing. You must supply
your Site ID account number in the ``GAUGES_SITE_ID``
setting.
|
def generate_eb_lightcurve(
times,
mags=None,
errs=None,
paramdists={'period':sps.uniform(loc=0.2,scale=99.8),
'pdepth':sps.uniform(loc=1.0e-4,scale=0.7),
'pduration':sps.uniform(loc=0.01,scale=0.44),
'depthratio':sps.uniform(loc=0.01,scale=0.99),
'secphase':sps.norm(loc=0.5,scale=0.1)},
magsarefluxes=False,
):
if mags is None:
mags = np.full_like(times, 0.0)
if errs is None:
errs = np.full_like(times, 0.0)
epoch = npr.random()*(times.max() - times.min()) + times.min()
period = paramdists['period'].rvs(size=1)
pdepth = paramdists['pdepth'].rvs(size=1)
pduration = paramdists['pduration'].rvs(size=1)
depthratio = paramdists['depthratio'].rvs(size=1)
secphase = paramdists['secphase'].rvs(size=1)
if magsarefluxes and pdepth < 0.0:
pdepth = -pdepth
elif not magsarefluxes and pdepth > 0.0:
pdepth = -pdepth
modelmags, phase, ptimes, pmags, perrs = (
eclipses.invgauss_eclipses_func([period, epoch, pdepth,
pduration, depthratio, secphase],
times,
mags,
errs)
)
timeind = np.argsort(ptimes)
mtimes = ptimes[timeind]
mmags = modelmags[timeind]
merrs = perrs[timeind]
modeldict = {
'vartype':'EB',
'params':{x:np.asscalar(y) for x,y in zip(['period',
'epoch',
'pdepth',
'pduration',
'depthratio'],
[period,
epoch,
pdepth,
pduration,
depthratio])},
'times':mtimes,
'mags':mmags,
'errs':merrs,
'varperiod':period,
'varamplitude':pdepth,
}
return modeldict
|
This generates fake EB light curves.
Parameters
----------
times : np.array
This is an array of time values that will be used as the time base.
mags,errs : np.array
These arrays will have the model added to them. If either is
None, `np.full_like(times, 0.0)` will used as a substitute and the model
light curve will be centered around 0.0.
paramdists : dict
This is a dict containing parameter distributions to use for the
model params, containing the following keys ::
{'period', 'pdepth', 'pduration', 'depthratio', 'secphase'}
The values of these keys should all be 'frozen' scipy.stats distribution
objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The variability epoch will be automatically chosen from a uniform
distribution between `times.min()` and `times.max()`.
The `pdepth` will be flipped automatically as appropriate if
`magsarefluxes=True`.
magsarefluxes : bool
If the generated time series is meant to be a flux time-series, set this
to True to get the correct sign of variability amplitude.
Returns
-------
dict
A dict of the form below is returned::
{'vartype': 'EB',
'params': {'period': generated value of period,
'epoch': generated value of epoch,
'pdepth': generated value of priary eclipse depth,
'pduration': generated value of prim eclipse duration,
'depthratio': generated value of prim/sec eclipse
depth ratio},
'times': the model times,
'mags': the model mags,
'errs': the model errs,
'varperiod': the generated period of variability == 'period'
'varamplitude': the generated amplitude of
variability == 'pdepth'}
|
def compose_err_msg(msg, **kwargs):
updated_msg = msg
for k, v in sorted(kwargs.items()):
if isinstance(v, _basestring):
updated_msg += "\n" + k + ": " + v
return updated_msg
|
Append key-value pairs to msg, for display.
Parameters
----------
msg: string
arbitrary message
kwargs: dict
arbitrary dictionary
Returns
-------
updated_msg: string
msg, with "key: value" appended. Only string values are appended.
Example
-------
>>> compose_err_msg('Error message with arguments...', arg_num=123, \
arg_str='filename.nii', arg_bool=True)
'Error message with arguments...\\narg_str: filename.nii'
>>>
|
def colormagdiagram_cplist(cplist,
outpkl,
color_mag1=['gaiamag','sdssg'],
color_mag2=['kmag','kmag'],
yaxis_mag=['gaia_absmag','rpmj']):
cplist_objectids = []
cplist_mags = []
cplist_colors = []
for cpf in cplist:
cpd = _read_checkplot_picklefile(cpf)
cplist_objectids.append(cpd['objectid'])
thiscp_mags = []
thiscp_colors = []
for cm1, cm2, ym in zip(color_mag1, color_mag2, yaxis_mag):
if (ym in cpd['objectinfo'] and
cpd['objectinfo'][ym] is not None):
thiscp_mags.append(cpd['objectinfo'][ym])
else:
thiscp_mags.append(np.nan)
if (cm1 in cpd['objectinfo'] and
cpd['objectinfo'][cm1] is not None and
cm2 in cpd['objectinfo'] and
cpd['objectinfo'][cm2] is not None):
thiscp_colors.append(cpd['objectinfo'][cm1] -
cpd['objectinfo'][cm2])
else:
thiscp_colors.append(np.nan)
cplist_mags.append(thiscp_mags)
cplist_colors.append(thiscp_colors)
cplist_objectids = np.array(cplist_objectids)
cplist_mags = np.array(cplist_mags)
cplist_colors = np.array(cplist_colors)
cmddict = {'objectids':cplist_objectids,
'mags':cplist_mags,
'colors':cplist_colors,
'color_mag1':color_mag1,
'color_mag2':color_mag2,
'yaxis_mag':yaxis_mag}
with open(outpkl,'wb') as outfd:
pickle.dump(cmddict, outfd, pickle.HIGHEST_PROTOCOL)
plt.close('all')
return cmddict
|
This makes color-mag diagrams for all checkplot pickles in the provided
list.
Can make an arbitrary number of CMDs given lists of x-axis colors and y-axis
mags to use.
Parameters
----------
cplist : list of str
This is the list of checkplot pickles to process.
outpkl : str
The filename of the output pickle that will contain the color-mag
information for all objects in the checkplots specified in `cplist`.
color_mag1 : list of str
This a list of the keys in each checkplot's `objectinfo` dict that will
be used as color_1 in the equation::
x-axis color = color_mag1 - color_mag2
color_mag2 : list of str
This a list of the keys in each checkplot's `objectinfo` dict that will
be used as color_2 in the equation::
x-axis color = color_mag1 - color_mag2
yaxis_mag : list of str
This is a list of the keys in each checkplot's `objectinfo` dict that
will be used as the (absolute) magnitude y-axis of the color-mag
diagrams.
Returns
-------
str
The path to the generated CMD pickle file for the collection of objects
in the input checkplot list.
Notes
-----
This can make many CMDs in one go. For example, the default kwargs for
`color_mag`, `color_mag2`, and `yaxis_mag` result in two CMDs generated and
written to the output pickle file:
- CMD1 -> gaiamag - kmag on the x-axis vs gaia_absmag on the y-axis
- CMD2 -> sdssg - kmag on the x-axis vs rpmj (J reduced PM) on the y-axis
|
def is_before(self, other):
if type(self.val) is not datetime.datetime:
raise TypeError('val must be datetime, but was type <%s>' % type(self.val).__name__)
if type(other) is not datetime.datetime:
raise TypeError('given arg must be datetime, but was type <%s>' % type(other).__name__)
if self.val >= other:
self._err('Expected <%s> to be before <%s>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), other.strftime('%Y-%m-%d %H:%M:%S')))
return self
|
Asserts that val is a date and is before other date.
|
def is_suspicious(self, result=None):
result = result if result is not None else self._last_result
suspicious = False
if result is not None:
suspicious = True if result['type'] > 0 else False
return suspicious
|
Check if IP is suspicious
:param result: httpBL results; if None, then results from last check_ip() used (optional)
:return: True or False
|
def handle_got_features_event(self, event):
server_features = set()
logger.debug("Checking roster-related features")
if event.features.find(FEATURE_ROSTERVER) is not None:
logger.debug(" Roster versioning available")
server_features.add("versioning")
if event.features.find(FEATURE_APPROVALS) is not None:
logger.debug(" Subscription pre-approvals available")
server_features.add("pre-approvals")
self.server_features = server_features
|
Check for roster related features in the stream features received
and set `server_features` accordingly.
|
def submit_tar(cl_args, unknown_args, tmp_dir):
topology_file = cl_args['topology-file-name']
java_defines = cl_args['topology_main_jvm_property']
main_class = cl_args['topology-class-name']
res = execute.heron_tar(
main_class,
topology_file,
tuple(unknown_args),
tmp_dir,
java_defines)
result.render(res)
if not result.is_successful(res):
err_context = ("Failed to create topology definition " \
"file when executing class '%s' of file '%s'") % (main_class, topology_file)
res.add_context(err_context)
return res
return launch_topologies(cl_args, topology_file, tmp_dir)
|
Extract and execute the java files inside the tar and then add topology
definition file created by running submitTopology
We use the packer to make a package for the tar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology class name.
The submitter inside will write out the topology defn file to a location
that we specify. Then we write the topology defn file to a well known
packer location. We then write to appropriate places in zookeeper
and launch the aurora jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return:
|
def pix_to_sub(self):
pix_to_sub = [[] for _ in range(self.pixels)]
for regular_pixel, pix_pixel in enumerate(self.sub_to_pix):
pix_to_sub[pix_pixel].append(regular_pixel)
return pix_to_sub
|
Compute the mappings between a pixelization's pixels and the unmasked sub-grid pixels. These mappings \
are determined after the regular-grid is used to determine the pixelization.
The pixelization's pixels map to different number of sub-grid pixels, thus a list of lists is used to \
represent these mappings
|
def bind(self, stream, resource):
self.stream = stream
stanza = Iq(stanza_type = "set")
payload = ResourceBindingPayload(resource = resource)
stanza.set_payload(payload)
self.stanza_processor.set_response_handlers(stanza,
self._bind_success, self._bind_error)
stream.send(stanza)
stream.event(BindingResourceEvent(resource))
|
Bind to a resource.
[initiating entity only]
:Parameters:
- `resource`: the resource name to bind to.
:Types:
- `resource`: `unicode`
XMPP stream is authenticated for bare JID only. To use
the full JID it must be bound to a resource.
|
def parse_header(head):
try:
(fromcall, path) = head.split('>', 1)
except:
raise ParseError("invalid packet header")
if (not 1 <= len(fromcall) <= 9 or
not re.findall(r"^[a-z0-9]{0,9}(\-[a-z0-9]{1,8})?$", fromcall, re.I)):
raise ParseError("fromcallsign is invalid")
path = path.split(',')
if len(path[0]) == 0:
raise ParseError("no tocallsign in header")
tocall = path[0]
path = path[1:]
validate_callsign(tocall, "tocallsign")
for digi in path:
if not re.findall(r"^[A-Z0-9\-]{1,9}\*?$", digi, re.I):
raise ParseError("invalid callsign in path")
parsed = {
'from': fromcall,
'to': tocall,
'path': path,
}
viacall = ""
if len(path) >= 2 and re.match(r"^q..$", path[-2]):
viacall = path[-1]
parsed.update({'via': viacall})
return parsed
|
Parses the header part of packet
Returns a dict
|
def write(self, path):
with open(path, "wb") as fout:
fout.write(self.m_buf)
|
Write buffer to file
|
def make_success_response(self, result):
response = self.make_response(constants.RESPONSE_STATUS_SUCCESS)
response[constants.RESPONSE_KEY_RESULT] = result
return response
|
Makes the python dict corresponding to the
JSON that needs to be sent for a successful
response. Result is the actual payload
that gets sent.
|
def to_internal_value(self, data):
if not isinstance(data, list):
message = self.error_messages['not_a_list'].format(
input_type=type(data).__name__
)
raise serializers.ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
})
ret = []
for item in data:
try:
validated = self.child.run_validation(item)
except serializers.ValidationError as exc:
ret.append(exc.detail)
else:
ret.append(validated)
return ret
|
This implements the same relevant logic as ListSerializer except that if one or more items fail validation,
processing for other items that did not fail will continue.
|
def SETA(cpu, dest):
dest.write(Operators.ITEBV(dest.size, Operators.OR(cpu.CF, cpu.ZF) == False, 1, 0))
|
Sets byte if above.
Sets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF, 1, 0) in the
EFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix
(cc, 1, 0) indicates the condition being tested for::
IF condition
THEN
DEST = 1;
ELSE
DEST = 0;
FI;
:param cpu: current CPU.
:param dest: destination operand.
|
def locked_get(self):
filters = {self.key_name: self.key_value}
query = self.session.query(self.model_class).filter_by(**filters)
entity = query.first()
if entity:
credential = getattr(entity, self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
else:
return None
|
Retrieve stored credential.
Returns:
A :class:`oauth2client.Credentials` instance or `None`.
|
def update_from_model_change(self, oldmodel, newmodel, tile):
self._loglikelihood -= self._calc_loglikelihood(oldmodel, tile=tile)
self._loglikelihood += self._calc_loglikelihood(newmodel, tile=tile)
self._residuals[tile.slicer] = self._data[tile.slicer] - newmodel
|
Update various internal variables from a model update from oldmodel to
newmodel for the tile `tile`
|
def write_boilerplate(name: str,
version: Optional[str] = None,
description: Optional[str] = None,
authors: Optional[str] = None,
contact: Optional[str] = None,
copyright: Optional[str] = None,
licenses: Optional[str] = None,
disclaimer: Optional[str] = None,
namespace_url: Optional[Mapping[str, str]] = None,
namespace_patterns: Optional[Mapping[str, str]] = None,
annotation_url: Optional[Mapping[str, str]] = None,
annotation_patterns: Optional[Mapping[str, str]] = None,
annotation_list: Optional[Mapping[str, Set[str]]] = None,
pmids: Optional[Iterable[Union[str, int]]] = None,
entrez_ids: Optional[Iterable[Union[str, int]]] = None,
file: Optional[TextIO] = None,
) -> None:
lines = make_knowledge_header(
name=name,
version=version or '1.0.0',
description=description,
authors=authors,
contact=contact,
copyright=copyright,
licenses=licenses,
disclaimer=disclaimer,
namespace_url=namespace_url,
namespace_patterns=namespace_patterns,
annotation_url=annotation_url,
annotation_patterns=annotation_patterns,
annotation_list=annotation_list,
)
for line in lines:
print(line, file=file)
if pmids is not None:
for line in make_pubmed_abstract_group(pmids):
print(line, file=file)
if entrez_ids is not None:
for line in make_pubmed_gene_group(entrez_ids):
print(line, file=file)
|
Write a boilerplate BEL document, with standard document metadata, definitions.
:param name: The unique name for this BEL document
:param contact: The email address of the maintainer
:param description: A description of the contents of this document
:param authors: The authors of this document
:param version: The version. Defaults to current date in format ``YYYYMMDD``.
:param copyright: Copyright information about this document
:param licenses: The license applied to this document
:param disclaimer: The disclaimer for this document
:param namespace_url: an optional dictionary of {str name: str URL} of namespaces
:param namespace_patterns: An optional dictionary of {str name: str regex} namespaces
:param annotation_url: An optional dictionary of {str name: str URL} of annotations
:param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations
:param annotation_list: An optional dictionary of {str name: set of names} of list annotations
:param pmids: A list of PubMed identifiers to auto-populate with citation and abstract
:param entrez_ids: A list of Entrez identifiers to autopopulate the gene summary as evidence
:param file: A writable file or file-like. If None, defaults to :data:`sys.stdout`
|
def output_is_valid(self, process_data):
if self.METADATA["data_type"] == "raster":
return (
is_numpy_or_masked_array(process_data) or
is_numpy_or_masked_array_with_tags(process_data)
)
elif self.METADATA["data_type"] == "vector":
return is_feature_list(process_data)
|
Check whether process output is allowed with output driver.
Parameters
----------
process_data : raw process output
Returns
-------
True or False
|
def get_equalisers(self):
if not self.__equalisers:
self.__equalisers = yield from self.handle_list(
self.API.get('equalisers'))
return self.__equalisers
|
Get the equaliser modes supported by this device.
|
def calculate_stats(data_list, stats_to_calculate=['mean', 'std'], percentiles_to_calculate=[]):
stats_to_numpy_method_map = {
'mean': numpy.mean,
'avg': numpy.mean,
'std': numpy.std,
'standard_deviation': numpy.std,
'median': numpy.median,
'min': numpy.amin,
'max': numpy.amax
}
calculated_stats = {}
calculated_percentiles = {}
if len(data_list) == 0:
return calculated_stats, calculated_percentiles
for stat in stats_to_calculate:
if stat in stats_to_numpy_method_map.keys():
calculated_stats[stat] = stats_to_numpy_method_map[stat](data_list)
else:
logger.error("Unsupported stat : " + str(stat))
for percentile in percentiles_to_calculate:
if isinstance(percentile, float) or isinstance(percentile, int):
calculated_percentiles[percentile] = numpy.percentile(data_list, percentile)
else:
logger.error("Unsupported percentile requested (should be int or float): " + str(percentile))
return calculated_stats, calculated_percentiles
|
Calculate statistics for given data.
:param list data_list: List of floats
:param list stats_to_calculate: List of strings with statistics to calculate. Supported stats are defined in constant stats_to_numpy_method_map
:param list percentiles_to_calculate: List of floats that defined which percentiles to calculate.
:return: tuple of dictionaries containing calculated statistics and percentiles
|
def get_start_date(self, obj):
obj_date = getattr(obj, self.get_date_field())
try:
obj_date = obj_date.date()
except AttributeError:
pass
return obj_date
|
Returns the start date for a model instance
|
def _run_io_threads(self, handler):
reader = ReadingThread(self.settings, handler, daemon = self.daemon,
exc_queue = self.exc_queue)
writter = WrittingThread(self.settings, handler, daemon = self.daemon,
exc_queue = self.exc_queue)
self.io_threads += [reader, writter]
reader.start()
writter.start()
|
Start threads for an IOHandler.
|
def read_bin_particle_density(self):
config = []
self.cnxn.xfer([0x33])
sleep(10e-3)
for i in range(4):
resp = self.cnxn.xfer([0x00])[0]
config.append(resp)
bpd = self._calculate_float(config)
return bpd
|
Read the bin particle density
:returns: float
|
def window_lanczos(N):
r
if N ==1:
return ones(1)
n = linspace(-N/2., N/2., N)
win = sinc(2*n/(N-1.))
return win
|
r"""Lanczos window also known as sinc window.
:param N: window length
.. math:: w(n) = sinc \left( \frac{2n}{N-1} - 1 \right)
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'lanczos')
.. seealso:: :func:`create_window`, :class:`Window`
|
def validate_litezip(struct):
msgs = []
def _fmt_err(err):
return (Path(err.filename), "{}:{} -- {}: {}".format(*(err[1:])))
obj_by_type = {}
for obj in struct:
if not is_valid_identifier(obj.id):
msg = (obj.file.parent,
"{} is not a valid identifier".format(obj.id),)
logger.info("{}: {}".format(*msg))
msgs.append(msg)
obj_by_type.setdefault(type(obj), []).append(obj)
for obtype in obj_by_type:
content_msgs = list([_fmt_err(err) for err in
validate_content(*obj_by_type[obtype])])
for msg in content_msgs:
logger.info("{}: {}".format(*msg))
msgs.extend(content_msgs)
return msgs
|
Validate the given litezip as `struct`.
Returns a list of validation messages.
|
def peripheral_didUpdateValueForCharacteristic_error_(self, peripheral, characteristic, error):
logger.debug('peripheral_didUpdateValueForCharacteristic_error called')
if error is not None:
return
device = device_list().get(peripheral)
if device is not None:
device._characteristic_changed(characteristic)
|
Called when characteristic value was read or updated.
|
def generateCoincMatrix(nCoinc=10, length=500, activity=50):
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array([1.0] * activity, dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length),
activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0
|
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
|
def _current_color(self, which=0):
if which == 1:
color = self.colors['edge_loop_color']
elif which == 2:
color = self.colors['vertex_color']
else:
div = self.coloring_sensitivity * self.num_servers + 1.
tmp = 1. - min(self.num_system / div, 1)
if self.edge[0] == self.edge[1]:
color = [i * tmp for i in self.colors['vertex_fill_color']]
color[3] = 1.0
else:
color = [i * tmp for i in self.colors['edge_color']]
color[3] = 1 / 2.
return color
|
Returns a color for the queue.
Parameters
----------
which : int (optional, default: ``0``)
Specifies the type of color to return.
Returns
-------
color : list
Returns a RGBA color that is represented as a list with 4
entries where each entry can be any floating point number
between 0 and 1.
* If ``which`` is 1 then it returns the color of the edge
as if it were a self loop. This is specified in
``colors['edge_loop_color']``.
* If ``which`` is 2 then it returns the color of the vertex
pen color (defined as color/vertex_color in
:meth:`.QueueNetworkDiGraph.graph_draw`). This is
specified in ``colors['vertex_color']``.
* If ``which`` is anything else, then it returns the a
shade of the edge that is proportional to the number of
agents in the system -- which includes those being
servered and those waiting to be served. More agents
correspond to darker edge colors. Uses
``colors['vertex_fill_color']`` if the queue sits on a
loop, and ``colors['edge_color']`` otherwise.
|
def getaddrlist(self):
result = []
ad = self.getaddress()
while ad:
result += ad
ad = self.getaddress()
return result
|
Parse all addresses.
Returns a list containing all of the addresses.
|
def plot_holdings(returns, positions, legend_loc='best', ax=None, **kwargs):
if ax is None:
ax = plt.gca()
positions = positions.copy().drop('cash', axis='columns')
df_holdings = positions.replace(0, np.nan).count(axis=1)
df_holdings_by_month = df_holdings.resample('1M').mean()
df_holdings.plot(color='steelblue', alpha=0.6, lw=0.5, ax=ax, **kwargs)
df_holdings_by_month.plot(
color='orangered',
lw=2,
ax=ax,
**kwargs)
ax.axhline(
df_holdings.values.mean(),
color='steelblue',
ls='--',
lw=3)
ax.set_xlim((returns.index[0], returns.index[-1]))
leg = ax.legend(['Daily holdings',
'Average daily holdings, by month',
'Average daily holdings, overall'],
loc=legend_loc, frameon=True,
framealpha=0.5)
leg.get_frame().set_edgecolor('black')
ax.set_title('Total holdings')
ax.set_ylabel('Holdings')
ax.set_xlabel('')
return ax
|
Plots total amount of stocks with an active position, either short
or long. Displays daily total, daily average per month, and
all-time daily average.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
legend_loc : matplotlib.loc, optional
The location of the legend on the plot.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
|
def __build_problem(self):
prob = constraint_matrices(self.model, zero_tol=self.feasibility_tol)
equalities = prob.equalities
b = prob.b
bounds = np.atleast_2d(prob.bounds).T
var_bounds = np.atleast_2d(prob.variable_bounds).T
homogeneous = all(np.abs(b) < self.feasibility_tol)
fixed_non_zero = np.abs(prob.variable_bounds[:, 1]) > \
self.feasibility_tol
fixed_non_zero &= prob.variable_fixed
if any(fixed_non_zero):
n_fixed = fixed_non_zero.sum()
rows = np.zeros((n_fixed, prob.equalities.shape[1]))
rows[range(n_fixed), np.where(fixed_non_zero)] = 1.0
equalities = np.vstack([equalities, rows])
var_b = prob.variable_bounds[:, 1]
b = np.hstack([b, var_b[fixed_non_zero]])
homogeneous = False
nulls = nullspace(equalities)
return Problem(
equalities=shared_np_array(equalities.shape, equalities),
b=shared_np_array(b.shape, b),
inequalities=shared_np_array(prob.inequalities.shape,
prob.inequalities),
bounds=shared_np_array(bounds.shape, bounds),
variable_fixed=shared_np_array(prob.variable_fixed.shape,
prob.variable_fixed, integer=True),
variable_bounds=shared_np_array(var_bounds.shape, var_bounds),
nullspace=shared_np_array(nulls.shape, nulls),
homogeneous=homogeneous
)
|
Build the matrix representation of the sampling problem.
|
def get_idx(self, node):
group = self.find_node_group_membership(node)
return self.nodes[group].index(node)
|
Finds the index of the node in the sorted list.
|
def read_words(filename="nietzsche.txt", replace=None):
if replace is None:
replace = ['\n', '<eos>']
with tf.gfile.GFile(filename, "r") as f:
try:
context_list = f.read().replace(*replace).split()
except Exception:
f.seek(0)
replace = [x.encode('utf-8') for x in replace]
context_list = f.read().replace(*replace).split()
return context_list
|
Read list format context from a file.
For customized read_words method, see ``tutorial_generate_text.py``.
Parameters
----------
filename : str
a file path.
replace : list of str
replace original string by target string.
Returns
-------
list of str
The context in a list (split using space).
|
def get_compose_dict(assembled_specs, port_specs):
compose_dict = _compose_dict_for_nginx(port_specs)
for app_name in assembled_specs['apps'].keys():
compose_dict[app_name] = _composed_app_dict(app_name, assembled_specs, port_specs)
for service_spec in assembled_specs['services'].values():
compose_dict[service_spec.name] = _composed_service_dict(service_spec)
return compose_dict
|
This function returns a dictionary representation of a docker-compose.yml file, based on assembled_specs from
the spec_assembler, and port_specs from the port_spec compiler
|
def list_servers(self, datacenter_id, depth=1):
response = self._perform_request(
'/datacenters/%s/servers?depth=%s' % (datacenter_id, str(depth)))
return response
|
Retrieves a list of all servers bound to the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
|
def mostLikely(self, pred):
if len(pred) == 1:
return pred.keys()[0]
mostLikelyOutcome = None
maxProbability = 0
for prediction, probability in pred.items():
if probability > maxProbability:
mostLikelyOutcome = prediction
maxProbability = probability
return mostLikelyOutcome
|
Helper function to return a scalar value representing the most
likely outcome given a probability distribution
|
def dist_abs(self, src, tar, max_offset=5):
if not src:
return len(tar)
if not tar:
return len(src)
src_len = len(src)
tar_len = len(tar)
src_cur = 0
tar_cur = 0
lcss = 0
local_cs = 0
while (src_cur < src_len) and (tar_cur < tar_len):
if src[src_cur] == tar[tar_cur]:
local_cs += 1
else:
lcss += local_cs
local_cs = 0
if src_cur != tar_cur:
src_cur = tar_cur = max(src_cur, tar_cur)
for i in range(max_offset):
if not (
(src_cur + i < src_len) or (tar_cur + i < tar_len)
):
break
if (src_cur + i < src_len) and (
src[src_cur + i] == tar[tar_cur]
):
src_cur += i
local_cs += 1
break
if (tar_cur + i < tar_len) and (
src[src_cur] == tar[tar_cur + i]
):
tar_cur += i
local_cs += 1
break
src_cur += 1
tar_cur += 1
lcss += local_cs
return round(max(src_len, tar_len) - lcss)
|
Return the "simplest" Sift4 distance between two terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
max_offset : int
The number of characters to search for matching letters
Returns
-------
int
The Sift4 distance according to the simplest formula
Examples
--------
>>> cmp = Sift4Simplest()
>>> cmp.dist_abs('cat', 'hat')
1
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('Colin', 'Cuilen')
3
>>> cmp.dist_abs('ATCG', 'TAGC')
2
|
def disable():
root = platform.config_root()
try:
os.makedirs(root)
except OSError:
pass
filename = os.path.join(root, 'keyringrc.cfg')
if os.path.exists(filename):
msg = "Refusing to overwrite {filename}".format(**locals())
raise RuntimeError(msg)
with open(filename, 'w') as file:
file.write('[backend]\ndefault-keyring=keyring.backends.null.Keyring')
|
Configure the null keyring as the default.
|
def load_gltf(self):
with open(self.path) as fd:
self.meta = GLTFMeta(self.path, json.load(fd))
|
Loads a gltf json file
|
def set_stream(self,stream):
self.jid=stream.me
self.stream=stream
for r in self.rooms.values():
r.set_stream(stream)
|
Change the stream assigned to `self`.
:Parameters:
- `stream`: the new stream to be assigned to `self`.
:Types:
- `stream`: `pyxmpp.stream.Stream`
|
def getModelIDFromParamsHash(self, paramsHash):
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None
|
Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found
|
def handle_reduce(self, reduce_function_names, mapped_docs):
reduce_functions = []
for reduce_function_name in reduce_function_names:
try:
reduce_function = get_function(reduce_function_name)
if getattr(reduce_function, 'view_decorated', None):
reduce_function = reduce_function(self.log)
reduce_functions.append(reduce_function)
except Exception, exc:
self.log(repr(exc))
reduce_functions.append(lambda *args, **kwargs: None)
keys, values = zip(
(key, value) for ((key, doc_id), value) in mapped_docs)
results = []
for reduce_function in reduce_functions:
try:
results.append(reduce_function(keys, values, rereduce=False))
except Exception, exc:
self.log(repr(exc))
results.append(None)
return [True, results]
|
Reduce several mapped documents by several reduction functions.
|
def apply_mask(image, mask_img):
img = check_img(image)
mask = check_img(mask_img)
check_img_compatibility(img, mask)
vol = img.get_data()
mask_data, _ = load_mask_data(mask)
return vol[mask_data], mask_data
|
Read a Nifti file nii_file and a mask Nifti file.
Returns the voxels in nii_file that are within the mask, the mask indices
and the mask shape.
Parameters
----------
image: img-like object or boyle.nifti.NeuroImage or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
mask_img: img-like object or boyle.nifti.NeuroImage or str
3D mask array: True where a voxel should be used.
See img description.
Returns
-------
vol[mask_indices], mask_indices
Note
----
nii_file and mask_file must have the same shape.
Raises
------
NiftiFilesNotCompatible, ValueError
|
def login(self, username=None, password=None, android_id=None):
cls_name = type(self).__name__
if username is None:
username = input("Enter your Google username or email address: ")
if password is None:
password = getpass.getpass("Enter your Google Music password: ")
if android_id is None:
android_id = Mobileclient.FROM_MAC_ADDRESS
try:
self.api.login(username, password, android_id)
except OSError:
logger.exception("{} authentication failed.".format(cls_name))
if not self.is_authenticated:
logger.warning("{} authentication failed.".format(cls_name))
return False
logger.info("{} authentication succeeded.\n".format(cls_name))
return True
|
Authenticate the gmusicapi Mobileclient instance.
Parameters:
username (Optional[str]): Your Google Music username. Will be prompted if not given.
password (Optional[str]): Your Google Music password. Will be prompted if not given.
android_id (Optional[str]): The 16 hex digits from an Android device ID.
Default: Use gmusicapi.Mobileclient.FROM_MAC_ADDRESS to create ID from computer's MAC address.
Returns:
``True`` on successful login or ``False`` on unsuccessful login.
|
def get_standardized_timestamp(timestamp, ts_format):
if not timestamp:
return None
if timestamp == 'now':
timestamp = str(datetime.datetime.now())
if not ts_format:
ts_format = detect_timestamp_format(timestamp)
try:
if ts_format == 'unknown':
logger.error('Unable to determine timestamp format for : %s', timestamp)
return -1
elif ts_format == 'epoch':
ts = int(timestamp) * 1000
elif ts_format == 'epoch_ms':
ts = timestamp
elif ts_format == 'epoch_fraction':
ts = int(timestamp[:10]) * 1000 + int(timestamp[11:])
elif ts_format in ('%H:%M:%S', '%H:%M:%S.%f'):
date_today = str(datetime.date.today())
dt_obj = datetime.datetime.strptime(date_today + ' ' + timestamp, '%Y-%m-%d ' + ts_format)
ts = calendar.timegm(dt_obj.utctimetuple()) * 1000 + dt_obj.microsecond / 1000
else:
dt_obj = datetime.datetime.strptime(timestamp, ts_format)
ts = calendar.timegm(dt_obj.utctimetuple()) * 1000 + dt_obj.microsecond / 1000
except ValueError:
return -1
return str(ts)
|
Given a timestamp string, return a time stamp in the epoch ms format. If no date is present in
timestamp then today's date will be added as a prefix before conversion to epoch ms
|
def stepper_config(self, steps_per_revolution, stepper_pins):
data = [self.STEPPER_CONFIGURE, steps_per_revolution & 0x7f, (steps_per_revolution >> 7) & 0x7f]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin])
self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)
|
Configure stepper motor prior to operation.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
|
def construct_inlines(self):
inline_formsets = []
for inline_class in self.get_inlines():
inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self)
inline_formset = inline_instance.construct_formset()
inline_formsets.append(inline_formset)
return inline_formsets
|
Returns the inline formset instances
|
def create(self, validated_data):
ret = []
for attrs in validated_data:
if 'non_field_errors' not in attrs and not any(isinstance(attrs[field], list) for field in attrs):
ret.append(self.child.create(attrs))
else:
ret.append(attrs)
return ret
|
This selectively calls the child create method based on whether or not validation failed for each payload.
|
def sim(self, src, tar, qval=2):
r
return super(self.__class__, self).sim(src, tar, qval, 1, 1)
|
r"""Return the Jaccard similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
Returns
-------
float
Jaccard similarity
Examples
--------
>>> cmp = Jaccard()
>>> cmp.sim('cat', 'hat')
0.3333333333333333
>>> cmp.sim('Niall', 'Neil')
0.2222222222222222
>>> cmp.sim('aluminum', 'Catalan')
0.0625
>>> cmp.sim('ATCG', 'TAGC')
0.0
|
def _ensure_started(self):
if self._process and self._process.poll() is None:
return
if not getattr(self, "_cmd"):
raise RuntimeError("Player command is not configured")
log.debug("Starting playback command: %r", self._cmd)
self._process = SilentPopen(self._cmd)
self._post_start()
|
Ensure player backing process is started
|
def filter_issues_for_tags(self, newer_tag, older_tag):
filtered_pull_requests = self.delete_by_time(self.pull_requests,
older_tag, newer_tag)
filtered_issues = self.delete_by_time(self.issues, older_tag,
newer_tag)
newer_tag_name = newer_tag["name"] if newer_tag else None
if self.options.filter_issues_by_milestone:
filtered_issues = self.filter_by_milestone(
filtered_issues, newer_tag_name, self.issues
)
filtered_pull_requests = self.filter_by_milestone(
filtered_pull_requests, newer_tag_name, self.pull_requests
)
return filtered_issues, filtered_pull_requests
|
Apply all filters to issues and pull requests.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if new tag is
the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will be
excluded. May be title of unreleased section.
:rtype: list(dict), list(dict)
:return: Filtered issues and pull requests.
|
def elements(self, using, value):
return self._execute(Command.FIND_CHILD_ELEMENTS, {
'using': using,
'value': value
})
|
find elements in the current element.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
Return a List<Element | None>, if no element matched, the list is empty.
Raises:
WebDriverException.
|
def new_noncomment(self, start_lineno, end_lineno):
block = NonComment(start_lineno, end_lineno)
self.blocks.append(block)
self.current_block = block
|
We are transitioning from a noncomment to a comment.
|
def create_patch(self, from_tag, to_tag):
return str(self._git.diff('{}..{}'.format(from_tag, to_tag), _tty_out=False))
|
Create a patch between tags
|
def unlock(self, password):
self.password = password
if self.config_key in self.config and self.config[self.config_key]:
self._decrypt_masterpassword()
else:
self._new_masterpassword(password)
self._save_encrypted_masterpassword()
|
The password is used to encrypt this masterpassword. To
decrypt the keys stored in the keys database, one must use
BIP38, decrypt the masterpassword from the configuration
store with the user password, and use the decrypted
masterpassword to decrypt the BIP38 encrypted private keys
from the keys storage!
:param str password: Password to use for en-/de-cryption
|
def _generateFindR(self, **kwargs):
for needle in self._generateChildrenR():
if needle._match(**kwargs):
yield needle
|
Generator which yields matches on AXChildren and their children.
|
def parse_conll(self, texts: List[str], retry_count: int = 0) -> List[str]:
post_data = {'texts': texts, 'output_type': 'conll'}
try:
response = requests.post(f'http://{self.hostname}:{self.port}',
json=post_data,
headers={'Connection': 'close'})
response.raise_for_status()
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as server_error:
raise ServerError(server_error, self.hostname, self.port)
except requests.exceptions.HTTPError as http_error:
raise http_error
else:
try:
return response.json()
except json.JSONDecodeError as json_exception:
if retry_count == self.retries:
self.log_error(response.text)
raise Exception('Json Decoding error cannot parse this '
f':\n{response.text}')
return self.parse_conll(texts, retry_count + 1)
|
Processes the texts using TweeboParse and returns them in CoNLL format.
:param texts: The List of Strings to be processed by TweeboParse.
:param retry_count: The number of times it has retried for. Default
0 does not require setting, main purpose is for
recursion.
:return: A list of CoNLL formated strings.
:raises ServerError: Caused when the server is not running.
:raises :py:class:`requests.exceptions.HTTPError`: Caused when the
input texts is not formated correctly e.g. When you give it a
String not a list of Strings.
:raises :py:class:`json.JSONDecodeError`: Caused if after self.retries
attempts to parse the data it cannot decode the data.
:Example:
|
def calibrateEB(variances, sigma2):
if (sigma2 <= 0 or min(variances) == max(variances)):
return(np.maximum(variances, 0))
sigma = np.sqrt(sigma2)
eb_prior = gfit(variances, sigma)
part = functools.partial(gbayes, g_est=eb_prior,
sigma=sigma)
if len(variances) >= 200:
calib_x = np.percentile(variances,
np.arange(0, 102, 2))
calib_y = list(map(part, calib_x))
calib_all = np.interp(variances, calib_x, calib_y)
else:
calib_all = list(map(part, variances))
return np.asarray(calib_all)
|
Calibrate noisy variance estimates with empirical Bayes.
Parameters
----------
vars: ndarray
List of variance estimates.
sigma2: int
Estimate of the Monte Carlo noise in vars.
Returns
-------
An array of the calibrated variance estimates
|
def filesByCell(fnames,cells):
byCell={}
fnames=smartSort(fnames)
days = list(set([elem[:5] for elem in fnames if elem.endswith(".abf")]))
for day in smartSort(days):
parent=None
for i,fname in enumerate([elem for elem in fnames if elem.startswith(day) and elem.endswith(".abf")]):
ID=os.path.splitext(fname)[0]
if len([x for x in fnames if x.startswith(ID)])-1:
parent=ID
if not parent in byCell:
byCell[parent]=[]
byCell[parent]=byCell[parent]+[fname]
return byCell
|
given files and cells, return a dict of files grouped by cell.
|
def _word_ngrams(self, tokens):
if self.stop_words is not None:
tokens = [w for w in tokens if w not in self.stop_words]
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i: i + n]))
return tokens
|
Turn tokens into a tokens of n-grams
ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L124-L153
|
def _download_item(item_id, path='.', item=None):
session.token = verify_credentials()
filename, content_iter = session.communicator.download_item(
item_id, session.token)
item_path = os.path.join(path, filename)
print('Creating file at {0}'.format(item_path))
out_file = open(item_path, 'wb')
for block in content_iter:
out_file.write(block)
out_file.close()
for callback in session.item_download_callbacks:
if not item:
item = session.communicator.item_get(session.token, item_id)
callback(session.communicator, session.token, item, item_path)
|
Download the requested item to the specified path.
:param item_id: The id of the item to be downloaded
:type item_id: int | long
:param path: (optional) the location to download the item
:type path: string
:param item: The dict of item info
:type item: dict | None
|
def convertImages(self):
exts=['.jpg','.png']
for fname in [x for x in self.files1 if cm.ext(x) in exts]:
ID="UNKNOWN"
if len(fname)>8 and fname[:8] in self.IDs:
ID=fname[:8]
fname2=ID+"_jpg_"+fname
if not fname2 in self.files2:
self.log.info("copying over [%s]"%fname2)
shutil.copy(os.path.join(self.folder1,fname),os.path.join(self.folder2,fname2))
if not fname[:8]+".abf" in self.files1:
self.log.error("orphan image: %s",fname)
exts=['.tif','.tiff']
for fname in [x for x in self.files1 if cm.ext(x) in exts]:
ID="UNKNOWN"
if len(fname)>8 and fname[:8] in self.IDs:
ID=fname[:8]
fname2=ID+"_tif_"+fname+".jpg"
if not fname2 in self.files2:
self.log.info("converting micrograph [%s]"%fname2)
imaging.TIF_to_jpg(os.path.join(self.folder1,fname),saveAs=os.path.join(self.folder2,fname2))
if not fname[:8]+".abf" in self.files1:
self.log.error("orphan image: %s",fname)
|
run this to turn all folder1 TIFs and JPGs into folder2 data.
TIFs will be treated as micrographs and converted to JPG with enhanced
contrast. JPGs will simply be copied over.
|
def execute(api):
try:
return api.execute()
except Exception as exception:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
_print_error('%s: Exception %s: %s' % (now, type(exception).__name__,
str(exception)))
raise exception
|
Executes operation.
Args:
api: The base API object
Returns:
A response body object
|
def admin_link_move_down(obj, link_text='down'):
if obj.rank == obj.grouped_filter().count():
return ''
content_type = ContentType.objects.get_for_model(obj)
link = reverse('awl-rankedmodel-move', args=(content_type.id, obj.id,
obj.rank + 1))
return '<a href="%s">%s</a>' % (link, link_text)
|
Returns a link to a view that moves the passed in object down in rank.
:param obj:
Object to move
:param link_text:
Text to display in the link. Defaults to "down"
:returns:
HTML link code to view for moving the object
|
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
return cli.run(argv)
|
Main command line interface.
|
def _parse_info(self, info_field):
info = dict()
for item in info_field.split(';'):
info_item_data = item.split('=')
if len(info_item_data) == 1:
info[info_item_data[0]] = True
elif len(info_item_data) == 2:
info[info_item_data[0]] = info_item_data[1]
return info
|
Parse the VCF info field
|
def _uptime_syllable():
global __boottime
try:
__boottime = os.stat('/dev/pty/mst/pty0').st_mtime
return time.time() - __boottime
except (NameError, OSError):
return None
|
Returns uptime in seconds or None, on Syllable.
|
def _parse_data_fields(self, fields, tag_id="tag", sub_id="code"):
for field in fields:
params = field.params
if tag_id not in params:
continue
field_repr = OrderedDict([
[self.i1_name, params.get(self.i1_name, " ")],
[self.i2_name, params.get(self.i2_name, " ")],
])
for subfield in field.find("subfield"):
if sub_id not in subfield.params:
continue
content = MARCSubrecord(
val=subfield.getContent().strip(),
i1=field_repr[self.i1_name],
i2=field_repr[self.i2_name],
other_subfields=field_repr
)
code = subfield.params[sub_id]
if code in field_repr:
field_repr[code].append(content)
else:
field_repr[code] = [content]
tag = params[tag_id]
if tag in self.datafields:
self.datafields[tag].append(field_repr)
else:
self.datafields[tag] = [field_repr]
|
Parse data fields.
Args:
fields (list): of HTMLElements
tag_id (str): parameter name, which holds the information, about
field name this is normally "tag", but in case of
oai_marc "id"
sub_id (str): id of parameter, which holds informations about
subfield name this is normally "code" but in case of
oai_marc "label"
|
def construct_datetime(cls, *args, **kwargs):
if len(args) == 1:
arg = args[0]
method = cls.__get_dt_constructor(
type(arg).__module__,
type(arg).__name__,
)
result = method(arg)
try:
result = result.replace(tzinfo=kwargs.pop('tzinfo'))
except KeyError:
pass
if kwargs:
first_key = kwargs.keys()[0]
tmpl = (
"{first_key} is an invalid keyword "
"argument for this function."
)
raise TypeError(tmpl.format(**locals()))
else:
result = datetime.datetime(*args, **kwargs)
return result
|
Construct a datetime.datetime from a number of different time
types found in python and pythonwin
|
def heappush_max(heap, item):
heap.append(item)
_siftdown_max(heap, 0, len(heap) - 1)
|
Push item onto heap, maintaining the heap invariant.
|
def _get_stream_schema(fields):
stream_schema = topology_pb2.StreamSchema()
for field in fields:
key = stream_schema.keys.add()
key.key = field
key.type = topology_pb2.Type.Value("OBJECT")
return stream_schema
|
Returns a StreamSchema protobuf message
|
def write(self, data):
args = parse_qs(self.handler.environ.get("QUERY_STRING"))
if "i" in args:
i = args["i"]
else:
i = "0"
super(JSONPolling, self).write("io.j[%s]('%s');" % (i, data))
|
Just quote out stuff before sending it out
|
def create_marathon_acme(
client_creator, cert_store, acme_email, allow_multiple_certs,
marathon_addrs, marathon_timeout, sse_timeout, mlb_addrs, group,
reactor):
marathon_client = MarathonClient(marathon_addrs, timeout=marathon_timeout,
sse_kwargs={'timeout': sse_timeout},
reactor=reactor)
marathon_lb_client = MarathonLbClient(mlb_addrs, reactor=reactor)
return MarathonAcme(
marathon_client,
group,
cert_store,
marathon_lb_client,
client_creator,
reactor,
acme_email,
allow_multiple_certs
)
|
Create a marathon-acme instance.
:param client_creator:
The txacme client creator function.
:param cert_store:
The txacme certificate store instance.
:param acme_email:
Email address to use when registering with the ACME service.
:param allow_multiple_certs:
Whether to allow multiple certificates per app port.
:param marathon_addr:
Address for the Marathon instance to find app domains that require
certificates.
:param marathon_timeout:
Amount of time in seconds to wait for response headers to be received
from Marathon.
:param sse_timeout:
Amount of time in seconds to wait for some event data to be received
from Marathon.
:param mlb_addrs:
List of addresses for marathon-lb instances to reload when a new
certificate is issued.
:param group:
The marathon-lb group (``HAPROXY_GROUP``) to consider when finding
app domains.
:param reactor: The reactor to use.
|
def write_creation_info(creation_info, out):
out.write('
for creator in sorted(creation_info.creators):
write_value('Creator', creator, out)
write_value('Created', creation_info.created_iso_format, out)
if creation_info.has_comment:
write_text_value('CreatorComment', creation_info.comment, out)
|
Write the creation info to out.
|
def popen_streaming_output(cmd, callback, timeout=None):
if os.name == 'nt':
process = subprocess.Popen(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout = process.stdout
else:
master, slave = os.openpty()
process = subprocess.Popen(
shlex.split(cmd, posix=True),
stdout=slave,
stderr=slave
)
stdout = os.fdopen(master)
os.close(slave)
def kill(process_):
try:
process_.kill()
except OSError:
pass
timer = Timer(timeout, kill, [process])
timer.setDaemon(True)
timer.start()
while process.returncode is None:
try:
if os.name == 'nt':
line = stdout.readline()
line = line.decode("utf-8")
if line:
callback(line.rstrip())
else:
while True:
line = stdout.readline()
if not line:
break
callback(line.rstrip())
except (IOError, OSError):
pass
if not timer.is_alive():
raise TimeoutError("subprocess running command '{}' timed out after {} seconds".format(cmd, timeout))
process.poll()
timer.cancel()
return process.returncode
|
Open a subprocess and stream its output without hard-blocking.
:param cmd: the command to execute within the subprocess
:type cmd: str
:param callback: function that intakes the subprocess' stdout line by line.
It is called for each line received from the subprocess' stdout stream.
:type callback: Callable[[Context], bool]
:param timeout: the timeout time of the subprocess
:type timeout: float
:raises TimeoutError: if the subprocess' execution time exceeds
the timeout time
:return: the return code of the executed subprocess
:rtype: int
|
def run(self):
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = rst.directives.path(self.arguments[0])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler = self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe('Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
try:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe('Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
config = self.state.document.settings.env.config
converter = M2R(no_underscore_emphasis=config.no_underscore_emphasis)
include_lines = statemachine.string2lines(converter(rawtext),
tab_width,
convert_whitespace=True)
self.state_machine.insert_input(include_lines, path)
return []
|
Most of this method is from ``docutils.parser.rst.Directive``.
docutils version: 0.12
|
def getParticleInfo(self, modelId):
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured'])
|
Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured)
|
def jhk_to_sdssu(jmag,hmag,kmag):
return convert_constants(jmag,hmag,kmag,
SDSSU_JHK,
SDSSU_JH, SDSSU_JK, SDSSU_HK,
SDSSU_J, SDSSU_H, SDSSU_K)
|
Converts given J, H, Ks mags to an SDSS u magnitude value.
Parameters
----------
jmag,hmag,kmag : float
2MASS J, H, Ks mags of the object.
Returns
-------
float
The converted SDSS u band magnitude.
|
def getapplist(self):
app_list = []
self._update_apps()
for gui in self._running_apps:
name = gui.localizedName()
try:
name = unicode(name)
except NameError:
name = str(name)
except UnicodeEncodeError:
pass
app_list.append(name)
return list(set(app_list))
|
Get all accessibility application name that are currently running
@return: list of appliction name of string type on success.
@rtype: list
|
def _trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,
minNumSyns):
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
nSegsRemoved, nSynsRemoved = 0, 0
segsToDel = []
for segment in segList:
synsToDel = [syn for syn in segment.syns if syn[2] < minPermanence]
if len(synsToDel) == len(segment.syns):
segsToDel.append(segment)
else:
if len(synsToDel) > 0:
for syn in synsToDel:
segment.syns.remove(syn)
nSynsRemoved += 1
if len(segment.syns) < minNumSyns:
segsToDel.append(segment)
nSegsRemoved += len(segsToDel)
for seg in segsToDel:
self._cleanUpdatesList(colIdx, cellIdx, seg)
self.cells[colIdx][cellIdx].remove(seg)
nSynsRemoved += len(seg.syns)
return nSegsRemoved, nSynsRemoved
|
This method goes through a list of segments for a given cell and
deletes all synapses whose permanence is less than minPermanence and deletes
any segments that have less than minNumSyns synapses remaining.
:param colIdx Column index
:param cellIdx Cell index within the column
:param segList List of segment references
:param minPermanence Any syn whose permamence is 0 or < minPermanence will
be deleted.
:param minNumSyns Any segment with less than minNumSyns synapses remaining
in it will be deleted.
:returns: tuple (numSegsRemoved, numSynsRemoved)
|
def luminosities_of_galaxies_within_circles_in_units(self, radius : dim.Length, unit_luminosity='eps', exposure_time=None):
return list(map(lambda galaxy: galaxy.luminosity_within_circle_in_units(
radius=radius, unit_luminosity=unit_luminosity, kpc_per_arcsec=self.kpc_per_arcsec,
exposure_time=exposure_time),
self.galaxies))
|
Compute the total luminosity of all galaxies in this plane within a circle of specified radius.
See *galaxy.light_within_circle* and *light_profiles.light_within_circle* for details \
of how this is performed.
Parameters
----------
radius : float
The radius of the circle to compute the dimensionless mass within.
units_luminosity : str
The units the luminosity is returned in (eps | counts).
exposure_time : float
The exposure time of the observation, which converts luminosity from electrons per second units to counts.
|
def Cp(self, T):
result = 0.0
for c, e in zip(self._coefficients, self._exponents):
result += c*T**e
return result
|
Calculate the heat capacity of the compound phase.
:param T: [K] temperature
:returns: [J/mol/K] Heat capacity.
|
def delete(self):
with db.session.begin_nested():
Membership.query_by_group(self).delete()
GroupAdmin.query_by_group(self).delete()
GroupAdmin.query_by_admin(self).delete()
db.session.delete(self)
|
Delete a group and all associated memberships.
|
def update_unique(self, table_name, fields, data, cond=None, unique_fields=None,
*, raise_if_not_found=False):
eid = find_unique(self.table(table_name), data, unique_fields)
if eid is None:
if raise_if_not_found:
msg = 'Could not find {} with {}'.format(table_name, data)
if cond is not None:
msg += ' where {}.'.format(cond)
raise IndexError(msg)
else:
self.table(table_name).update(_to_string(fields), cond=cond, eids=[eid])
return eid
|
Update the unique matching element to have a given set of fields.
Parameters
----------
table_name: str
fields: dict or function[dict -> None]
new data/values to insert into the unique element
or a method that will update the elements.
data: dict
Sample data for query
cond: tinydb.Query
which elements to update
unique_fields: list of str
raise_if_not_found: bool
Will raise an exception if the element is not found for update.
Returns
-------
eid: int
The eid of the updated element if found, None otherwise.
|
def get_wrapper_by_name(env, classname):
currentenv = env
while True:
if classname == currentenv.class_name():
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s" % classname)
|
Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname
|
def modelAdoptNextOrphan(self, jobId, maxUpdateInterval):
@g_retrySQL
def findCandidateModelWithRetries():
modelID = None
with ConnectionFactory.get() as conn:
query = 'SELECT model_id FROM %s ' \
' WHERE status=%%s ' \
' AND job_id=%%s ' \
' AND TIMESTAMPDIFF(SECOND, ' \
' _eng_last_update_time, ' \
' UTC_TIMESTAMP()) > %%s ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [self.STATUS_RUNNING, jobId, maxUpdateInterval]
numRows = conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
assert numRows <= 1, "Unexpected numRows: %r" % numRows
if numRows == 1:
(modelID,) = rows[0]
return modelID
@g_retrySQL
def adoptModelWithRetries(modelID):
adopted = False
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_worker_conn_id=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE model_id=%%s ' \
' AND status=%%s' \
' AND TIMESTAMPDIFF(SECOND, ' \
' _eng_last_update_time, ' \
' UTC_TIMESTAMP()) > %%s ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [self._connectionID, modelID, self.STATUS_RUNNING,
maxUpdateInterval]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert numRowsAffected <= 1, 'Unexpected numRowsAffected=%r' % (
numRowsAffected,)
if numRowsAffected == 1:
adopted = True
else:
(status, connectionID) = self._getOneMatchingRowNoRetries(
self._models, conn, {'model_id':modelID},
['status', '_eng_worker_conn_id'])
adopted = (status == self.STATUS_RUNNING and
connectionID == self._connectionID)
return adopted
adoptedModelID = None
while True:
modelID = findCandidateModelWithRetries()
if modelID is None:
break
if adoptModelWithRetries(modelID):
adoptedModelID = modelID
break
return adoptedModelID
|
Look through the models table for an orphaned model, which is a model
that is not completed yet, whose _eng_last_update_time is more than
maxUpdateInterval seconds ago.
If one is found, change its _eng_worker_conn_id to the current worker's
and return the model id.
Parameters:
----------------------------------------------------------------
retval: modelId of the model we adopted, or None if none found
|
def encode_payload(self, messages):
if not messages or messages[0] is None:
return ''
if len(messages) == 1:
return messages[0].encode('utf-8')
payload = u''.join([(u'\ufffd%d\ufffd%s' % (len(p), p))
for p in messages if p is not None])
return payload.encode('utf-8')
|
Encode list of messages. Expects messages to be unicode.
``messages`` - List of raw messages to encode, if necessary
|
def _addLoggingLevel(levelName, levelNum, methodName=None):
if not methodName:
methodName = levelName.lower()
if hasattr(logging, levelName):
raise AttributeError(
'{} already defined in logging module'.format(levelName))
if hasattr(logging, methodName):
raise AttributeError(
'{} already defined in logging module'.format(methodName))
if hasattr(logging.getLoggerClass(), methodName):
raise AttributeError(
'{} already defined in logger class'.format(methodName))
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot)
|
Comprehensively adds a new logging level to the `logging` module and the
currently configured logging class.
`levelName` becomes an attribute of the `logging` module with the value
`levelNum`. `methodName` becomes a convenience method for both `logging`
itself and the class returned by `logging.getLoggerClass()` (usually just
`logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
used.
To avoid accidental clobberings of existing attributes, this method will
raise an `AttributeError` if the level name is already an attribute of the
`logging` module or if the method name is already present
Example
-------
>>> addLoggingLevel('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5
|
def set_login(self, callsign, passwd="-1", skip_login=False):
self.__dict__.update(locals())
|
Set callsign and password
|
def get_command_responses(self):
if not self.response_queue.empty():
yield None
while not self.response_queue.empty():
line = self.response_queue.get()
if line is not None:
yield line
|
Get responses to commands sent
|
def upload_json_results(self, token, filepath, community_id,
producer_display_name, metric_name,
producer_revision, submit_time, **kwargs):
parameters = dict()
parameters['token'] = token
parameters['communityId'] = community_id
parameters['producerDisplayName'] = producer_display_name
parameters['metricName'] = metric_name
parameters['producerRevision'] = producer_revision
parameters['submitTime'] = submit_time
optional_keys = [
'config_item_id', 'test_dataset_id', 'truth_dataset_id', 'silent',
'unofficial', 'build_results_url', 'branch', 'extra_urls',
'params']
for key in optional_keys:
if key in kwargs:
if key == 'config_item_id':
parameters['configItemId'] = kwargs[key]
elif key == 'test_dataset_id':
parameters['testDatasetId'] = kwargs[key]
elif key == 'truth_dataset_id':
parameters['truthDatasetId'] = kwargs[key]
elif key == 'parent_keys':
parameters['parentKeys'] = kwargs[key]
elif key == 'build_results_url':
parameters['buildResultsUrl'] = kwargs[key]
elif key == 'extra_urls':
parameters['extraUrls'] = json.dumps(kwargs[key])
elif key == 'params':
parameters[key] = json.dumps(kwargs[key])
elif key == 'silent':
if kwargs[key]:
parameters[key] = kwargs[key]
elif key == 'unofficial':
if kwargs[key]:
parameters[key] = kwargs[key]
else:
parameters[key] = kwargs[key]
file_payload = open(filepath, 'rb')
response = self.request('midas.tracker.results.upload.json',
parameters, file_payload)
return response
|
Upload a JSON file containing numeric scoring results to be added as
scalars. File is parsed and then deleted from the server.
:param token: A valid token for the user in question.
:param filepath: The path to the JSON file.
:param community_id: The id of the community that owns the producer.
:param producer_display_name: The display name of the producer.
:param producer_revision: The repository revision of the producer
that produced this value.
:param submit_time: The submit timestamp. Must be parsable with PHP
strtotime().
:param config_item_id: (optional) If this value pertains to a specific
configuration item, pass its id here.
:param test_dataset_id: (optional) If this value pertains to a
specific test dataset, pass its id here.
:param truth_dataset_id: (optional) If this value pertains to a
specific ground truth dataset, pass its id here.
:param parent_keys: (optional) Semicolon-separated list of parent keys
to look for numeric results under. Use '.' to denote nesting, like
in normal javascript syntax.
:param silent: (optional) If true, do not perform threshold-based email
notifications for this scalar.
:param unofficial: (optional) If true, creates an unofficial scalar
visible only to the user performing the submission.
:param build_results_url: (optional) A URL for linking to build results
for this submission.
:param branch: (optional) The branch name in the source repository for
this submission.
:param params: (optional) Any key/value pairs that should be displayed
with this scalar result.
:type params: dict
:param extra_urls: (optional) Other URL's that should be displayed with
with this scalar result. Each element of the list should be a dict
with the following keys: label, text, href
:type extra_urls: list of dicts
:returns: The list of scalars that were created.
|
def emulate_until(self, target: int):
self._concrete = True
self._break_unicorn_at = target
if self.emu:
self.emu._stop_at = target
|
Tells the CPU to set up a concrete unicorn emulator and use it to execute instructions
until target is reached.
:param target: Where Unicorn should hand control back to Manticore. Set to 0 for all instructions.
|
def available_sources(sources):
for dirs, name in sources:
for directory in dirs:
fn = os.path.join(directory, name) + '.py'
if os.path.isfile(fn):
yield fn
|
Yield the sources that are present.
|
def encode(self) -> str:
payload = {}
payload.update(self.registered_claims)
payload.update(self.payload)
return encode(self.secret, payload, self.alg, self.header)
|
Create a token based on the data held in the class.
:return: A new token
:rtype: str
|
def make_shell_logfile_data_url(host, shell_port, instance_id, offset, length):
return "http://%s:%d/filedata/log-files/%s.log.0?offset=%s&length=%s" % \
(host, shell_port, instance_id, offset, length)
|
Make the url for log-file data in heron-shell
from the info stored in stmgr.
|
def add_package(self, name):
name, cls_name = parse_package_string(name)
if name in self.package_map:
return
package = EffectPackage(name)
package.load()
self.packages.append(package)
self.package_map[package.name] = package
self.polulate(package.effect_packages)
|
Registers a single package
:param name: (str) The effect package to add
|
def apply_exclude_tags_regex(self, all_tags):
filtered = []
for tag in all_tags:
if not re.match(self.options.exclude_tags_regex, tag["name"]):
filtered.append(tag)
if len(all_tags) == len(filtered):
self.warn_if_nonmatching_regex()
return filtered
|
Filter tags according exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
|
def remove_metabolites(self, metabolite_list, destructive=False):
if not hasattr(metabolite_list, '__iter__'):
metabolite_list = [metabolite_list]
metabolite_list = [x for x in metabolite_list
if x.id in self.metabolites]
for x in metabolite_list:
x._model = None
associated_groups = self.get_associated_groups(x)
for group in associated_groups:
group.remove_members(x)
if not destructive:
for the_reaction in list(x._reaction):
the_coefficient = the_reaction._metabolites[x]
the_reaction.subtract_metabolites({x: the_coefficient})
else:
for x in list(x._reaction):
x.remove_from_model()
self.metabolites -= metabolite_list
to_remove = [self.solver.constraints[m.id] for m in metabolite_list]
self.remove_cons_vars(to_remove)
context = get_context(self)
if context:
context(partial(self.metabolites.__iadd__, metabolite_list))
for x in metabolite_list:
context(partial(setattr, x, '_model', self))
|
Remove a list of metabolites from the the object.
The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolite_list : list
A list with `cobra.Metabolite` objects as elements.
destructive : bool
If False then the metabolite is removed from all
associated reactions. If True then all associated
reactions are removed from the Model.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.