code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def clean_course(self):
course_id = self.cleaned_data[self.Fields.COURSE].strip()
if not course_id:
return None
try:
client = EnrollmentApiClient()
return client.get_course_details(course_id)
except (HttpClientError, HttpServerError):
raise ValidationError(ValidationMessages.INVALID_COURSE_ID.format(course_id=course_id))
|
Verify course ID and retrieve course details.
|
def preprocess_constraints(ml, cl, n):
"Create a graph of constraints for both must- and cannot-links"
ml_graph, cl_graph = {}, {}
for i in range(n):
ml_graph[i] = set()
cl_graph[i] = set()
def add_both(d, i, j):
d[i].add(j)
d[j].add(i)
for (i, j) in ml:
ml_graph[i].add(j)
ml_graph[j].add(i)
for (i, j) in cl:
cl_graph[i].add(j)
cl_graph[j].add(i)
def dfs(i, graph, visited, component):
visited[i] = True
for j in graph[i]:
if not visited[j]:
dfs(j, graph, visited, component)
component.append(i)
visited = [False] * n
neighborhoods = []
for i in range(n):
if not visited[i] and ml_graph[i]:
component = []
dfs(i, ml_graph, visited, component)
for x1 in component:
for x2 in component:
if x1 != x2:
ml_graph[x1].add(x2)
neighborhoods.append(component)
for (i, j) in cl:
for x in ml_graph[i]:
add_both(cl_graph, x, j)
for y in ml_graph[j]:
add_both(cl_graph, i, y)
for x in ml_graph[i]:
for y in ml_graph[j]:
add_both(cl_graph, x, y)
for i in ml_graph:
for j in ml_graph[i]:
if j != i and j in cl_graph[i]:
raise InconsistentConstraintsException('Inconsistent constraints between {} and {}'.format(i, j))
return ml_graph, cl_graph, neighborhoods
|
Create a graph of constraints for both must- and cannot-links
|
def save_policy(self, path):
with open(path, 'wb') as f:
pickle.dump(self.policy, f)
|
Pickles the current policy for later inspection.
|
def transform(self, X):
X = check_array(X)
X_rbf = np.empty_like(X) if self.copy else X
X_in = X
if not self.squared:
np.power(X_in, 2, out=X_rbf)
X_in = X_rbf
if self.scale_by_median:
scale = self.median_ if self.squared else self.median_ ** 2
gamma = self.gamma * scale
else:
gamma = self.gamma
np.multiply(X_in, -gamma, out=X_rbf)
np.exp(X_rbf, out=X_rbf)
return X_rbf
|
Turns distances into RBF values.
Parameters
----------
X : array
The raw pairwise distances.
Returns
-------
X_rbf : array of same shape as X
The distances in X passed through the RBF kernel.
|
def create_rcontext(self, size, frame):
if self.format == 'pdf':
surface = cairo.PDFSurface(self._output_file(frame), *size)
elif self.format in ('ps', 'eps'):
surface = cairo.PSSurface(self._output_file(frame), *size)
elif self.format == 'svg':
surface = cairo.SVGSurface(self._output_file(frame), *size)
elif self.format == 'surface':
surface = self.target
else:
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, *size)
return cairo.Context(surface)
|
Called when CairoCanvas needs a cairo context to draw on
|
def load_by_pub_key(self, public_key):
data = self.get_data("account/keys/")
for jsoned in data['ssh_keys']:
if jsoned.get('public_key', "") == public_key:
self.id = jsoned['id']
self.load()
return self
return None
|
This method will load a SSHKey object from DigitalOcean
from a public_key. This method will avoid problems like
uploading the same public_key twice.
|
def parse_litezip(path):
struct = [parse_collection(path)]
struct.extend([parse_module(x) for x in path.iterdir()
if x.is_dir() and x.name.startswith('m')])
return tuple(sorted(struct))
|
Parse a litezip file structure to a data structure given the path
to the litezip directory.
|
def get_name():
if env.vm_type == EC2:
for instance in get_all_running_ec2_instances():
if env.host_string == instance.public_dns_name:
name = instance.tags.get(env.vm_name_tag)
return name
else:
raise NotImplementedError
|
Retrieves the instance name associated with the current host string.
|
def get_long_short_pos(positions):
pos_wo_cash = positions.drop('cash', axis=1)
longs = pos_wo_cash[pos_wo_cash > 0].sum(axis=1).fillna(0)
shorts = pos_wo_cash[pos_wo_cash < 0].sum(axis=1).fillna(0)
cash = positions.cash
net_liquidation = longs + shorts + cash
df_pos = pd.DataFrame({'long': longs.divide(net_liquidation, axis='index'),
'short': shorts.divide(net_liquidation,
axis='index')})
df_pos['net exposure'] = df_pos['long'] + df_pos['short']
return df_pos
|
Determines the long and short allocations in a portfolio.
Parameters
----------
positions : pd.DataFrame
The positions that the strategy takes over time.
Returns
-------
df_long_short : pd.DataFrame
Long and short allocations as a decimal
percentage of the total net liquidation
|
def jhk_to_sdssz(jmag,hmag,kmag):
return convert_constants(jmag,hmag,kmag,
SDSSZ_JHK,
SDSSZ_JH, SDSSZ_JK, SDSSZ_HK,
SDSSZ_J, SDSSZ_H, SDSSZ_K)
|
Converts given J, H, Ks mags to an SDSS z magnitude value.
Parameters
----------
jmag,hmag,kmag : float
2MASS J, H, Ks mags of the object.
Returns
-------
float
The converted SDSS z band magnitude.
|
def _getClassifierRegion(self):
if (self._netInfo.net is not None and
"Classifier" in self._netInfo.net.regions):
return self._netInfo.net.regions["Classifier"]
else:
return None
|
Returns reference to the network's Classifier region
|
def unlink_user(self, enterprise_customer, user_email):
try:
existing_user = User.objects.get(email=user_email)
link_record = self.get(enterprise_customer=enterprise_customer, user_id=existing_user.id)
link_record.delete()
if update_user:
update_user.delay(
sailthru_vars={
'is_enterprise_learner': False,
'enterprise_name': None,
},
email=user_email
)
except User.DoesNotExist:
pending_link = PendingEnterpriseCustomerUser.objects.get(
enterprise_customer=enterprise_customer, user_email=user_email
)
pending_link.delete()
LOGGER.info(
'Enterprise learner {%s} successfully unlinked from Enterprise Customer {%s}',
user_email,
enterprise_customer.name
)
|
Unlink user email from Enterprise Customer.
If :class:`django.contrib.auth.models.User` instance with specified email does not exist,
:class:`.PendingEnterpriseCustomerUser` instance is deleted instead.
Raises EnterpriseCustomerUser.DoesNotExist if instance of :class:`django.contrib.auth.models.User` with
specified email exists and corresponding :class:`.EnterpriseCustomerUser` instance does not.
Raises PendingEnterpriseCustomerUser.DoesNotExist exception if instance of
:class:`django.contrib.auth.models.User` with specified email exists and corresponding
:class:`.PendingEnterpriseCustomerUser` instance does not.
|
def freeze(proto_dataset_uri):
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH
)
num_items = len(list(proto_dataset._identifiers()))
max_files_limit = int(dtoolcore.utils.get_config_value(
"DTOOL_MAX_FILES_LIMIT",
CONFIG_PATH,
10000
))
assert isinstance(max_files_limit, int)
if num_items > max_files_limit:
click.secho(
"Too many items ({} > {}) in proto dataset".format(
num_items,
max_files_limit
),
fg="red"
)
click.secho("1. Consider splitting the dataset into smaller datasets")
click.secho("2. Consider packaging small files using tar")
click.secho("3. Increase the limit using the DTOOL_MAX_FILES_LIMIT")
click.secho(" environment variable")
sys.exit(2)
handles = [h for h in proto_dataset._storage_broker.iter_item_handles()]
for h in handles:
if not valid_handle(h):
click.secho(
"Invalid item name: {}".format(h),
fg="red"
)
click.secho("1. Consider renaming the item")
click.secho("2. Consider removing the item")
sys.exit(3)
with click.progressbar(length=len(list(proto_dataset._identifiers())),
label="Generating manifest") as progressbar:
try:
proto_dataset.freeze(progressbar=progressbar)
except dtoolcore.storagebroker.DiskStorageBrokerValidationWarning as e:
click.secho("")
click.secho(str(e), fg="red", nl=False)
sys.exit(4)
click.secho("Dataset frozen ", nl=False, fg="green")
click.secho(proto_dataset_uri)
|
Convert a proto dataset into a dataset.
This step is carried out after all files have been added to the dataset.
Freezing a dataset finalizes it with a stamp marking it as frozen.
|
def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):
raise NotImplementedError("draw() is not implemented")
|
Draw function called by the system every frame when the effect is active.
This method raises ``NotImplementedError`` unless implemented.
Args:
time (float): The current time in seconds.
frametime (float): The time the previous frame used to render in seconds.
target (``moderngl.Framebuffer``): The target FBO for the effect.
|
def data_to_tfrecord(images, labels, filename):
if os.path.isfile(filename):
print("%s exists" % filename)
return
print("Converting data into %s ..." % filename)
writer = tf.python_io.TFRecordWriter(filename)
for index, img in enumerate(images):
img_raw = img.tobytes()
label = int(labels[index])
example = tf.train.Example(
features=tf.train.Features(
feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
}
)
)
writer.write(example.SerializeToString())
writer.close()
|
Save data into TFRecord.
|
def halfadder_gate(variables, vartype=dimod.BINARY, name='HALF_ADDER'):
variables = tuple(variables)
if vartype is dimod.BINARY:
configs = frozenset([(0, 0, 0, 0),
(0, 1, 1, 0),
(1, 0, 1, 0),
(1, 1, 0, 1)])
else:
configs = frozenset([(-1, -1, -1, -1),
(-1, +1, +1, -1),
(+1, -1, +1, -1),
(+1, +1, -1, +1)])
def func(augend, addend, sum_, carry):
total = (augend > 0) + (addend > 0)
if total == 0:
return (sum_ <= 0) and (carry <= 0)
elif total == 1:
return (sum_ > 0) and (carry <= 0)
elif total == 2:
return (sum_ <= 0) and (carry > 0)
else:
raise ValueError("func recieved unexpected values")
return Constraint(func, configs, variables, vartype=vartype, name=name)
|
Half adder.
Args:
variables (list): Variable labels for the and gate as `[in1, in2, sum, carry]`,
where `in1, in2` are inputs to be added and `sum` and 'carry' the resultant
outputs.
vartype (Vartype, optional, default='BINARY'): Variable type. Accepted
input values:
* Vartype.SPIN, 'SPIN', {-1, 1}
* Vartype.BINARY, 'BINARY', {0, 1}
name (str, optional, default='HALF_ADDER'): Name for the constraint.
Returns:
Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are
assigned values that match the valid states of a Boolean half adder.
Examples:
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.halfadder_gate(['a', 'b', 'total', 'carry'], name='HA1'))
>>> csp.check({'a': 1, 'b': 1, 'total': 0, 'carry': 1})
True
|
def regenerate_good_tokens(string):
toks = nltk.word_tokenize(string)
pos_string = nltk.pos_tag(toks)
pos_seq = [tag[1] for tag in pos_string]
pos_ngrams = ngrams(pos_seq, 2, 4)
sel_pos_ngrams = f7(pos_ngrams)
return sel_pos_ngrams
|
Given an input string, part of speech tags the string, then generates a list of
ngrams that appear in the string.
Used to define grammatically correct part of speech tag sequences.
Returns a list of part of speech tag sequences.
|
def fetch(self):
from ..iq import Iq
jid,node = self.address
iq = Iq(to_jid = jid, stanza_type = "get")
disco = self.disco_class(node)
iq.add_content(disco.xmlnode)
self.stream.set_response_handlers(iq,self.__response, self.__error,
self.__timeout)
self.stream.send(iq)
|
Initialize the Service Discovery process.
|
def convert_compound(mass, source, target, element):
target_mass_fraction = element_mass_fraction(target, element)
if target_mass_fraction == 0.0:
return 0.0
else:
source_mass_fraction = element_mass_fraction(source, element)
return mass * source_mass_fraction / target_mass_fraction
|
Convert the specified mass of the source compound to the target using
element as basis.
:param mass: Mass of from_compound. [kg]
:param source: Formula and phase of the original compound, e.g.
'Fe2O3[S1]'.
:param target: Formula and phase of the target compound, e.g. 'Fe[S1]'.
:param element: Element to use as basis for the conversion, e.g. 'Fe' or
'O'.
:returns: Mass of target. [kg]
|
def is_connected(self):
try:
self.exec_command(b"Query(ConnectionState)")
return self.status.connection_state.startswith(b"C(")
except NotConnectedException:
return False
|
Return bool indicating connection state
|
def load_class(path):
package, klass = path.rsplit('.', 1)
module = import_module(package)
return getattr(module, klass)
|
dynamically load a class given a string of the format
package.Class
|
def compose_projects_json(projects, data):
projects = compose_git(projects, data)
projects = compose_mailing_lists(projects, data)
projects = compose_bugzilla(projects, data)
projects = compose_github(projects, data)
projects = compose_gerrit(projects)
projects = compose_mbox(projects)
return projects
|
Compose projects.json with all data sources
:param projects: projects.json
:param data: eclipse JSON
:return: projects.json with all data sources
|
def model_returns_t_alpha_beta(data, bmark, samples=2000, progressbar=True):
data_bmark = pd.concat([data, bmark], axis=1).dropna()
with pm.Model() as model:
sigma = pm.HalfCauchy(
'sigma',
beta=1)
nu = pm.Exponential('nu_minus_two', 1. / 10.)
X = data_bmark.iloc[:, 1]
y = data_bmark.iloc[:, 0]
alpha_reg = pm.Normal('alpha', mu=0, sd=.1)
beta_reg = pm.Normal('beta', mu=0, sd=1)
mu_reg = alpha_reg + beta_reg * X
pm.StudentT('returns',
nu=nu + 2,
mu=mu_reg,
sd=sigma,
observed=y)
trace = pm.sample(samples, progressbar=progressbar)
return model, trace
|
Run Bayesian alpha-beta-model with T distributed returns.
This model estimates intercept (alpha) and slope (beta) of two
return sets. Usually, these will be algorithm returns and
benchmark returns (e.g. S&P500). The data is assumed to be T
distributed and thus is robust to outliers and takes tail events
into account. If a pandas.DataFrame is passed as a benchmark, then
multiple linear regression is used to estimate alpha and beta.
Parameters
----------
returns : pandas.Series
Series of simple returns of an algorithm or stock.
bmark : pandas.DataFrame
DataFrame of benchmark returns (e.g., S&P500) or risk factors (e.g.,
Fama-French SMB, HML, and UMD).
If bmark has more recent returns than returns_train, these dates
will be treated as missing values and predictions will be
generated for them taking market correlations into account.
samples : int (optional)
Number of posterior samples to draw.
Returns
-------
model : pymc.Model object
PyMC3 model containing all random variables.
trace : pymc3.sampling.BaseTrace object
A PyMC3 trace object that contains samples for each parameter
of the posterior.
|
def _collapse_edge_by_namespace(graph: BELGraph,
victim_namespaces: Strings,
survivor_namespaces: str,
relations: Strings) -> None:
relation_filter = build_relation_predicate(relations)
source_namespace_filter = build_source_namespace_filter(victim_namespaces)
target_namespace_filter = build_target_namespace_filter(survivor_namespaces)
edge_predicates = [
relation_filter,
source_namespace_filter,
target_namespace_filter
]
_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates)
|
Collapse pairs of nodes with the given namespaces that have the given relationship.
:param graph: A BEL Graph
:param victim_namespaces: The namespace(s) of the node to collapse
:param survivor_namespaces: The namespace of the node to keep
:param relations: The relation(s) to search
|
def get(self, key, default=None):
if key in self:
return self.__getitem__(key)
else:
return default
|
Return the key if exists or a default value
:param str value: Value
:param str default: Default value if key not present
|
def _parse_string(self, xml):
if not isinstance(xml, HTMLElement):
xml = dhtmlparser.parseString(str(xml))
record = xml.find("record")
if not record:
raise ValueError("There is no <record> in your MARC XML document!")
record = record[0]
self.oai_marc = len(record.find("oai_marc")) > 0
if not self.oai_marc:
leader = record.find("leader")
if len(leader) >= 1:
self.leader = leader[0].getContent()
if self.oai_marc:
self._parse_control_fields(record.find("fixfield"), "id")
self._parse_data_fields(record.find("varfield"), "id", "label")
else:
self._parse_control_fields(record.find("controlfield"), "tag")
self._parse_data_fields(record.find("datafield"), "tag", "code")
if self.oai_marc and "LDR" in self.controlfields:
self.leader = self.controlfields["LDR"]
|
Parse MARC XML document to dicts, which are contained in
self.controlfields and self.datafields.
Args:
xml (str or HTMLElement): input data
Also detect if this is oai marc format or not (see elf.oai_marc).
|
def sendmsg(self,
message,
recipient_mobiles=[],
url='http://services.ambientmobile.co.za/sms',
concatenate_message=True,
message_id=str(time()).replace(".", ""),
reply_path=None,
allow_duplicates=True,
allow_invalid_numbers=True,
):
if not recipient_mobiles or not(isinstance(recipient_mobiles, list) \
or isinstance(recipient_mobiles, tuple)):
raise AmbientSMSError("Missing recipients")
if not message or not len(message):
raise AmbientSMSError("Missing message")
postXMLList = []
postXMLList.append("<api-key>%s</api-key>" % self.api_key)
postXMLList.append("<password>%s</password>" % self.password)
postXMLList.append("<recipients>%s</recipients>" % \
"".join(["<mobile>%s</mobile>" % \
m for m in recipient_mobiles]))
postXMLList.append("<msg>%s</msg>" % message)
postXMLList.append("<concat>%s</concat>" % \
(1 if concatenate_message else 0))
postXMLList.append("<message_id>%s</message_id>" % message_id)
postXMLList.append("<allow_duplicates>%s</allow_duplicates>" % \
(1 if allow_duplicates else 0))
postXMLList.append(
"<allow_invalid_numbers>%s</allow_invalid_numbers>" % \
(1 if allow_invalid_numbers else 0)
)
if reply_path:
postXMLList.append("<reply_path>%s</reply_path>" % reply_path)
postXML = '<sms>%s</sms>' % "".join(postXMLList)
result = self.curl(url, postXML)
status = result.get("status", None)
if status and int(status) in [0, 1, 2]:
return result
else:
raise AmbientSMSError(int(status))
|
Send a mesage via the AmbientSMS API server
|
def add_tags(self, item, *tags):
try:
assert item["data"]["tags"]
except AssertionError:
item["data"]["tags"] = list()
for tag in tags:
item["data"]["tags"].append({"tag": "%s" % tag})
assert self.check_items([item])
return self.update_item(item)
|
Add one or more tags to a retrieved item,
then update it on the server
Accepts a dict, and one or more tags to add to it
Returns the updated item from the server
|
def estimate(self):
self.mul(300)
self.Cpig(300)
estimates = {'Tb': self.Tb(self.counts),
'Tm': self.Tm(self.counts),
'Tc': self.Tc(self.counts, self.Tb_estimated),
'Pc': self.Pc(self.counts, self.atom_count),
'Vc': self.Vc(self.counts),
'Hf': self.Hf(self.counts),
'Gf': self.Gf(self.counts),
'Hfus': self.Hfus(self.counts),
'Hvap': self.Hvap(self.counts),
'mul': self.mul,
'mul_coeffs': self.calculated_mul_coeffs,
'Cpig': self.Cpig,
'Cpig_coeffs': self.calculated_Cpig_coeffs}
return estimates
|
Method to compute all available properties with the Joback method;
returns their results as a dict. For the tempearture dependent values
Cpig and mul, both the coefficients and objects to perform calculations
are returned.
|
def split_code_and_text_blocks(source_file):
docstring, rest_of_content = get_docstring_and_rest(source_file)
blocks = [('text', docstring)]
pattern = re.compile(
r'(?P<header_line>^
flags=re.M)
pos_so_far = 0
for match in re.finditer(pattern, rest_of_content):
match_start_pos, match_end_pos = match.span()
code_block_content = rest_of_content[pos_so_far:match_start_pos]
text_content = match.group('text_content')
sub_pat = re.compile('^
text_block_content = dedent(re.sub(sub_pat, '', text_content))
if code_block_content.strip():
blocks.append(('code', code_block_content))
if text_block_content.strip():
blocks.append(('text', text_block_content))
pos_so_far = match_end_pos
remaining_content = rest_of_content[pos_so_far:]
if remaining_content.strip():
blocks.append(('code', remaining_content))
return blocks
|
Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, content)
List where each element is a tuple with the label ('text' or 'code'),
and content string of block.
|
def derep_concat_split(data, sample, nthreads, force):
LOGGER.info("INSIDE derep %s", sample.name)
mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq")
if not force:
if not os.path.exists(mergefile):
sample.files.edits = concat_multiple_edits(data, sample)
else:
LOGGER.info("skipped concat_multiple_edits: {} exists"\
.format(mergefile))
else:
sample.files.edits = concat_multiple_edits(data, sample)
if 'pair' in data.paramsdict['datatype']:
if "reference" in data.paramsdict["assembly_method"]:
nmerged = merge_pairs(data, sample.files.edits, mergefile, 0, 0)
else:
nmerged = merge_pairs(data, sample.files.edits, mergefile, 1, 1)
sample.files.edits = [(mergefile, )]
sample.stats.reads_merged = nmerged
if "3rad" in data.paramsdict["datatype"]:
declone_3rad(data, sample)
derep_and_sort(data,
os.path.join(data.dirs.edits, sample.name+"_declone.fastq"),
os.path.join(data.dirs.edits, sample.name+"_derep.fastq"),
nthreads)
else:
derep_and_sort(data,
sample.files.edits[0][0],
os.path.join(data.dirs.edits, sample.name+"_derep.fastq"),
nthreads)
|
Running on remote Engine. Refmaps, then merges, then dereplicates,
then denovo clusters reads.
|
def _get_filter_field(field_name, field_value):
filter_field = None
if isinstance(field_value, ValueRange):
range_values = {}
if field_value.lower:
range_values.update({"gte": field_value.lower_string})
if field_value.upper:
range_values.update({"lte": field_value.upper_string})
filter_field = {
"range": {
field_name: range_values
}
}
elif _is_iterable(field_value):
filter_field = {
"terms": {
field_name: field_value
}
}
else:
filter_field = {
"term": {
field_name: field_value
}
}
return filter_field
|
Return field to apply into filter, if an array then use a range, otherwise look for a term match
|
def to_line_string(self, closed=True):
from imgaug.augmentables.lines import LineString
if not closed or len(self.exterior) <= 1:
return LineString(self.exterior, label=self.label)
return LineString(
np.concatenate([self.exterior, self.exterior[0:1, :]], axis=0),
label=self.label)
|
Convert this polygon's `exterior` to a ``LineString`` instance.
Parameters
----------
closed : bool, optional
Whether to close the line string, i.e. to add the first point of
the `exterior` also as the last point at the end of the line string.
This has no effect if the polygon has a single point or zero
points.
Returns
-------
imgaug.augmentables.lines.LineString
Exterior of the polygon as a line string.
|
def set_name(self, name):
if not self._campfire.get_user().admin:
return False
result = self._connection.put("room/%s" % self.id, {"room": {"name": name}})
if result["success"]:
self._load()
return result["success"]
|
Set the room name.
Args:
name (str): Name
Returns:
bool. Success
|
def base_url(self):
return '{proto}://{host}:{port}{url_path}'.format(
proto=self.protocol,
host=self.host,
port=self.port,
url_path=self.url_path,
)
|
A base_url that will be used to construct the final
URL we're going to query against.
:returns: A URL of the form: ``proto://host:port``.
:rtype: :obj:`string`
|
def stop_apps_or_services(app_or_service_names=None, rm_containers=False):
if app_or_service_names:
log_to_client("Stopping the following apps or services: {}".format(', '.join(app_or_service_names)))
else:
log_to_client("Stopping all running containers associated with Dusty")
compose.stop_running_services(app_or_service_names)
if rm_containers:
compose.rm_containers(app_or_service_names)
|
Stop any currently running Docker containers associated with
Dusty, or associated with the provided apps_or_services. Does not remove
the service's containers.
|
def config_to_args(config):
result = []
for key, value in iteritems(config):
if value is False:
continue
key = '--{0}'.format(key.replace('_', '-'))
if isinstance(value, (list, set, tuple)):
for item in value:
result.extend((key, smart_str(item)))
elif value is not True:
result.extend((key, smart_str(value)))
else:
result.append(key)
return tuple(result)
|
Convert config dict to arguments list.
:param config: Configuration dict.
|
def register_fetcher(self, object_class, fetcher_class):
self._lock.acquire()
try:
cache = self._caches.get(object_class)
if not cache:
cache = Cache(self.max_items, self.default_freshness_period,
self.default_expiration_period, self.default_purge_period)
self._caches[object_class] = cache
cache.set_fetcher(fetcher_class)
finally:
self._lock.release()
|
Register a fetcher class for an object class.
:Parameters:
- `object_class`: class to be retrieved by the fetcher.
- `fetcher_class`: the fetcher class.
:Types:
- `object_class`: `classobj`
- `fetcher_class`: `CacheFetcher` based class
|
def custom(cls, customgrouper):
if customgrouper is None:
raise TypeError("Argument to custom() must be ICustomGrouping instance or classpath")
if not isinstance(customgrouper, ICustomGrouping) and not isinstance(customgrouper, str):
raise TypeError("Argument to custom() must be ICustomGrouping instance or classpath")
serialized = default_serializer.serialize(customgrouper)
return cls.custom_serialized(serialized, is_java=False)
|
Custom grouping from a given implementation of ICustomGrouping
:param customgrouper: The ICustomGrouping implemention to use
|
def _validate_schema(obj):
if obj is not None and not isinstance(obj, Schema):
raise IncompatibleSchema('Schema must be of type {0}'.format(Schema))
return obj
|
Ensures the passed schema instance is compatible
:param obj: object to validate
:return: obj
:raises:
- IncompatibleSchema if the passed schema is of an incompatible type
|
def _printAvailableCheckpoints(experimentDir):
checkpointParentDir = getCheckpointParentDir(experimentDir)
if not os.path.exists(checkpointParentDir):
print "No available checkpoints."
return
checkpointDirs = [x for x in os.listdir(checkpointParentDir)
if _isCheckpointDir(os.path.join(checkpointParentDir, x))]
if not checkpointDirs:
print "No available checkpoints."
return
print "Available checkpoints:"
checkpointList = [_checkpointLabelFromCheckpointDir(x)
for x in checkpointDirs]
for checkpoint in sorted(checkpointList):
print "\t", checkpoint
print
print "To start from a checkpoint:"
print " python run_opf_experiment.py experiment --load <CHECKPOINT>"
print "For example, to start from the checkpoint \"MyCheckpoint\":"
print " python run_opf_experiment.py experiment --load MyCheckpoint"
|
List available checkpoints for the specified experiment.
|
def update_oai_info(self):
for field in record_get_field_instances(self.record, '909', ind1="C", ind2="O"):
new_subs = []
for tag, value in field[0]:
if tag == "o":
new_subs.append(("a", value))
else:
new_subs.append((tag, value))
if value in ["CERN", "CDS", "ForCDS"]:
self.tag_as_cern = True
record_add_field(self.record, '024', ind1="8", subfields=new_subs)
record_delete_fields(self.record, '909')
|
Add the 909 OAI info to 035.
|
def process_configuration_success(self, stanza):
_unused = stanza
self.configured = True
self.handler.room_configured()
|
Process success response for a room configuration request.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Presence`
|
def _createPeriodicActivities(self):
periodicActivities = []
class MetricsReportCb(object):
def __init__(self, taskRunner):
self.__taskRunner = taskRunner
return
def __call__(self):
self.__taskRunner._getAndEmitExperimentMetrics()
reportMetrics = PeriodicActivityRequest(
repeating=True,
period=1000,
cb=MetricsReportCb(self))
periodicActivities.append(reportMetrics)
class IterationProgressCb(object):
PROGRESS_UPDATE_PERIOD_TICKS = 1000
def __init__(self, taskLabel, requestedIterationCount, logger):
self.__taskLabel = taskLabel
self.__requestedIterationCount = requestedIterationCount
self.__logger = logger
self.__numIterationsSoFar = 0
def __call__(self):
self.__numIterationsSoFar += self.PROGRESS_UPDATE_PERIOD_TICKS
self.__logger.debug("%s: ITERATION PROGRESS: %s of %s" % (
self.__taskLabel,
self.__numIterationsSoFar,
self.__requestedIterationCount))
iterationProgressCb = IterationProgressCb(
taskLabel=self.__task['taskLabel'],
requestedIterationCount=self.__task['iterationCount'],
logger=self.__logger)
iterationProgressReporter = PeriodicActivityRequest(
repeating=True,
period=IterationProgressCb.PROGRESS_UPDATE_PERIOD_TICKS,
cb=iterationProgressCb)
periodicActivities.append(iterationProgressReporter)
return periodicActivities
|
Creates and returns a list of activites for this TaskRunner instance
Returns: a list of PeriodicActivityRequest elements
|
def newcursor(self, dictcursor=False):
handle = hashlib.sha256(os.urandom(12)).hexdigest()
if dictcursor:
self.cursors[handle] = self.connection.cursor(
cursor_factory=psycopg2.extras.DictCursor
)
else:
self.cursors[handle] = self.connection.cursor()
return (self.cursors[handle], handle)
|
This creates a DB cursor for the current DB connection using a
randomly generated handle. Returns a tuple with cursor and handle.
Parameters
----------
dictcursor : bool
If True, returns a cursor where each returned row can be addressed
as a dictionary by column name.
Returns
-------
tuple
The tuple is of the form (handle, psycopg2.Cursor instance).
|
def create(self):
params = {
"name": self.name,
"type": self.type,
"dns_names": self.dns_names,
"private_key": self.private_key,
"leaf_certificate": self.leaf_certificate,
"certificate_chain": self.certificate_chain
}
data = self.get_data("certificates/", type=POST, params=params)
if data:
self.id = data['certificate']['id']
self.not_after = data['certificate']['not_after']
self.sha1_fingerprint = data['certificate']['sha1_fingerprint']
self.created_at = data['certificate']['created_at']
self.type = data['certificate']['type']
self.dns_names = data['certificate']['dns_names']
self.state = data['certificate']['state']
return self
|
Create the Certificate
|
def _bundle_exists(self, path):
for attached_bundle in self._attached_bundles:
if path == attached_bundle.path:
return True
return False
|
Checks if a bundle exists at the provided path
:param path: Bundle path
:return: bool
|
def _validate_subnet_cidr(context, network_id, new_subnet_cidr):
if neutron_cfg.cfg.CONF.allow_overlapping_ips:
return
try:
new_subnet_ipset = netaddr.IPSet([new_subnet_cidr])
except TypeError:
LOG.exception("Invalid or missing cidr: %s" % new_subnet_cidr)
raise n_exc.BadRequest(resource="subnet",
msg="Invalid or missing cidr")
filters = {
'network_id': network_id,
'shared': [False]
}
subnet_list = db_api.subnet_find(context=context.elevated(), **filters)
for subnet in subnet_list:
if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset):
err_msg = (_("Requested subnet with cidr: %(cidr)s for "
"network: %(network_id)s overlaps with another "
"subnet") %
{'cidr': new_subnet_cidr,
'network_id': network_id})
LOG.error(_("Validation for CIDR: %(new_cidr)s failed - "
"overlaps with subnet %(subnet_id)s "
"(CIDR: %(cidr)s)"),
{'new_cidr': new_subnet_cidr,
'subnet_id': subnet.id,
'cidr': subnet.cidr})
raise n_exc.InvalidInput(error_message=err_msg)
|
Validate the CIDR for a subnet.
Verifies the specified CIDR does not overlap with the ones defined
for the other subnets specified for this network, or with any other
CIDR if overlapping IPs are disabled.
|
def oauth2_callback(request):
if 'error' in request.GET:
reason = request.GET.get(
'error_description', request.GET.get('error', ''))
reason = html.escape(reason)
return http.HttpResponseBadRequest(
'Authorization failed {0}'.format(reason))
try:
encoded_state = request.GET['state']
code = request.GET['code']
except KeyError:
return http.HttpResponseBadRequest(
'Request missing state or authorization code')
try:
server_csrf = request.session[_CSRF_KEY]
except KeyError:
return http.HttpResponseBadRequest(
'No existing session for this flow.')
try:
state = json.loads(encoded_state)
client_csrf = state['csrf_token']
return_url = state['return_url']
except (ValueError, KeyError):
return http.HttpResponseBadRequest('Invalid state parameter.')
if client_csrf != server_csrf:
return http.HttpResponseBadRequest('Invalid CSRF token.')
flow = _get_flow_for_token(client_csrf, request)
if not flow:
return http.HttpResponseBadRequest('Missing Oauth2 flow.')
try:
credentials = flow.step2_exchange(code)
except client.FlowExchangeError as exchange_error:
return http.HttpResponseBadRequest(
'An error has occurred: {0}'.format(exchange_error))
get_storage(request).put(credentials)
signals.oauth2_authorized.send(sender=signals.oauth2_authorized,
request=request, credentials=credentials)
return shortcuts.redirect(return_url)
|
View that handles the user's return from OAuth2 provider.
This view verifies the CSRF state and OAuth authorization code, and on
success stores the credentials obtained in the storage provider,
and redirects to the return_url specified in the authorize view and
stored in the session.
Args:
request: Django request.
Returns:
A redirect response back to the return_url.
|
def is_action_available(self, action):
temp_state = np.rot90(self._state, action)
return self._is_action_available_left(temp_state)
|
Determines whether action is available.
That is, executing it would change the state.
|
def agitate(self):
for (varName, var) in self.permuteVars.iteritems():
var.agitate()
self.newPosition()
|
Agitate this particle so that it is likely to go to a new position.
Every time agitate is called, the particle is jiggled an even greater
amount.
Parameters:
--------------------------------------------------------------
retval: None
|
def get_object(cls, api_token, ssh_key_id):
ssh_key = cls(token=api_token, id=ssh_key_id)
ssh_key.load()
return ssh_key
|
Class method that will return a SSHKey object by ID.
|
def is_redirecting(path):
candidate = unipath(path, '.cpenv')
return os.path.exists(candidate) and os.path.isfile(candidate)
|
Returns True if path contains a .cpenv file
|
def is_text(self):
return self.type in [
self._TYPE_PASTE,
self._TYPE_TEXT,
self._TYPE_TWEET
]
|
Tells if this message is a text message.
Returns:
bool. Success
|
def credentials(self):
ctx = _app_ctx_stack.top
if not hasattr(ctx, _CREDENTIALS_KEY):
ctx.google_oauth2_credentials = self.storage.get()
return ctx.google_oauth2_credentials
|
The credentials for the current user or None if unavailable.
|
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(arr_0to1, shape, min_value=min_value, max_value=max_value)
|
Create a heatmaps object from an heatmap array containing values ranging from 0 to 255.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width and ``C`` is the number of heatmap channels.
Expected dtype is uint8.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed. NOT the shape of the
heatmap(s) array, unless it is identical to the image shape (note the likely
difference between the arrays in the number of channels).
If there is not a corresponding image, use the shape of the heatmaps array.
min_value : float, optional
Minimum value for the heatmaps that the 0-to-255 array represents. This will usually
be 0.0. It is used when calling :func:`imgaug.HeatmapsOnImage.get_arr`, which converts the
underlying ``(0, 255)`` array to value range ``(min_value, max_value)``.
max_value : float, optional
Maximum value for the heatmaps that 0-to-255 array represents.
See parameter `min_value` for details.
Returns
-------
imgaug.HeatmapsOnImage
Heatmaps object.
|
def f_theta(cos_theta, zint, z, n2n1=0.95, sph6_ab=None, **kwargs):
wvfront = (np.outer(np.ones_like(z)*zint, cos_theta) -
np.outer(zint+z, csqrt(n2n1**2-1+cos_theta**2)))
if (sph6_ab is not None) and (not np.isnan(sph6_ab)):
sec2_theta = 1.0/(cos_theta*cos_theta)
wvfront += sph6_ab * (sec2_theta-1)*(sec2_theta-2)*cos_theta
if wvfront.dtype == np.dtype('complex128'):
wvfront.imag = -np.abs(wvfront.imag)
return wvfront
|
Returns the wavefront aberration for an aberrated, defocused lens.
Calculates the portions of the wavefront distortion due to z, theta
only, for a lens with defocus and spherical aberration induced by
coverslip mismatch. (The rho portion can be analytically integrated
to Bessels.)
Parameters
----------
cos_theta : numpy.ndarray.
The N values of cos(theta) at which to compute f_theta.
zint : Float
The position of the lens relative to the interface.
z : numpy.ndarray
The M z-values to compute f_theta at. `z.size` is unrelated
to `cos_theta.size`
n2n1: Float, optional
The ratio of the index of the immersed medium to the optics.
Default is 0.95
sph6_ab : Float or None, optional
Set sph6_ab to a nonzero value to add residual 6th-order
spherical aberration that is proportional to sph6_ab. Default
is None (i.e. doesn't calculate).
Returns
-------
wvfront : numpy.ndarray
The aberrated wavefront, as a function of theta and z.
Shape is [z.size, cos_theta.size]
|
def parse_time(block_time):
return datetime.strptime(block_time, timeFormat).replace(tzinfo=timezone.utc)
|
Take a string representation of time from the blockchain, and parse it
into datetime object.
|
def sample(field, inds=None, slicer=None, flat=True):
if inds is not None:
out = field.ravel()[inds]
elif slicer is not None:
out = field[slicer].ravel()
else:
out = field
if flat:
return out.ravel()
return out
|
Take a sample from a field given flat indices or a shaped slice
Parameters
-----------
inds : list of indices
One dimensional (raveled) indices to return from the field
slicer : slice object
A shaped (3D) slicer that returns a section of image
flat : boolean
Whether to flatten the sampled item before returning
|
def send_message(self):
start = time.time()
message = None
if not self.initialized:
message = self.construct_start_message()
self.initialized = True
else:
message = self.construct_end_message()
self.send_UDP_message(message)
end = time.time()
return end - start
|
Send message over UDP.
If tracking is disables, the bytes_sent will always be set to -1
Returns:
(bytes_sent, time_taken)
|
def publish(self, pid=None, id_=None):
pid = pid or self.pid
if not pid.is_registered():
raise PIDInvalidAction()
self['_deposit']['status'] = 'published'
if self['_deposit'].get('pid') is None:
self._publish_new(id_=id_)
else:
record = self._publish_edited()
record.commit()
self.commit()
return self
|
Publish a deposit.
If it's the first time:
* it calls the minter and set the following meta information inside
the deposit:
.. code-block:: python
deposit['_deposit'] = {
'type': pid_type,
'value': pid_value,
'revision_id': 0,
}
* A dump of all information inside the deposit is done.
* A snapshot of the files is done.
Otherwise, published the new edited version.
In this case, if in the mainwhile someone already published a new
version, it'll try to merge the changes with the latest version.
.. note:: no need for indexing as it calls `self.commit()`.
Status required: ``'draft'``.
:param pid: Force the new pid value. (Default: ``None``)
:param id_: Force the new uuid value as deposit id. (Default: ``None``)
:returns: Returns itself.
|
def _as_dict(self, r):
d = dict()
for i, f in enumerate(self._field_names):
d[f] = r[i] if i < len(r) else None
return d
|
Convert the record to a dictionary using field names as keys.
|
def get_connection(module_name: str, connection: Optional[str] = None) -> str:
if connection is not None:
return connection
module_name = module_name.lower()
module_config_cls = get_module_config_cls(module_name)
module_config = module_config_cls.load()
return module_config.connection or config.connection
|
Return the SQLAlchemy connection string if it is set.
Order of operations:
1. Return the connection if given as a parameter
2. Check the environment for BIO2BEL_{module_name}_CONNECTION
3. Look in the bio2bel config file for module-specific connection. Create if doesn't exist. Check the
module-specific section for ``connection``
4. Look in the bio2bel module folder for a config file. Don't create if doesn't exist. Check the default section
for ``connection``
5. Check the environment for BIO2BEL_CONNECTION
6. Check the bio2bel config file for default
7. Fall back to standard default cache connection
:param module_name: The name of the module to get the configuration for
:param connection: get the SQLAlchemy connection string
:return: The SQLAlchemy connection string based on the configuration
|
def circular(cls, shape, pixel_scale, radius_arcsec, centre=(0., 0.), invert=False):
mask = mask_util.mask_circular_from_shape_pixel_scale_and_radius(shape, pixel_scale, radius_arcsec,
centre)
if invert: mask = np.invert(mask)
return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
|
Setup a mask where unmasked pixels are within a circle of an input arc second radius and centre.
Parameters
----------
shape: (int, int)
The (y,x) shape of the mask in units of pixels.
pixel_scale: float
The arc-second to pixel conversion factor of each pixel.
radius_arcsec : float
The radius (in arc seconds) of the circle within which pixels unmasked.
centre: (float, float)
The centre of the circle used to mask pixels.
|
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int):
for x, y in zip(a, b):
result |= x ^ y
else:
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
|
This compares two values in constant time.
Taken from tornado:
https://github.com/tornadoweb/tornado/blob/
d4eb8eb4eb5cc9a6677e9116ef84ded8efba8859/tornado/web.py#L3060
|
def logs(self, prefix='worker'):
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
|
Generates a dictionary that contains all collected statistics.
|
def count(self, *args, **kwargs):
search = self.create_search(*args, **kwargs)
try:
return search.count()
except NotFoundError:
print_error("The index was not found, have you initialized the index?")
except (ConnectionError, TransportError):
print_error("Cannot connect to elasticsearch")
|
Returns the number of results after filtering with the given arguments.
|
def generate(self):
tar_bytes = BytesIO()
tar = tarfile.open(fileobj=tar_bytes, mode='w')
self._generate_contents(tar)
self._process_files(tar)
tar.close()
tar_bytes.seek(0)
gzip_bytes = BytesIO()
gz = gzip.GzipFile(fileobj=gzip_bytes, mode='wb', mtime=0)
gz.write(tar_bytes.getvalue())
gz.close()
gzip_bytes.seek(0)
return gzip_bytes
|
Returns a ``BytesIO`` instance representing an in-memory tar.gz archive
containing the native router configuration.
:returns: in-memory tar.gz archive, instance of ``BytesIO``
|
def parent_callback(self, parent_fu):
if parent_fu.done() is True:
e = parent_fu._exception
if e:
super().set_exception(e)
else:
super().set_result(self.file_obj)
return
|
Callback from executor future to update the parent.
Args:
- parent_fu (Future): Future returned by the executor along with callback
Returns:
- None
Updates the super() with the result() or exception()
|
def get_prefix(self, include_version=True):
host = settings.host
if '://' not in host:
host = 'https://%s' % host.strip('/')
elif host.startswith('http://') and settings.verify_ssl:
raise exc.TowerCLIError(
'Can not verify ssl with non-https protocol. Change the '
'verify_ssl configuration setting to continue.'
)
url_pieces = urlparse(host)
if url_pieces[0] not in ['http', 'https']:
raise exc.ConnectionError('URL must be http(s), {} is not valid'.format(url_pieces[0]))
prefix = urljoin(host, '/api/')
if include_version:
prefix = urljoin(prefix, "{}/".format(CUR_API_VERSION))
return prefix
|
Return the appropriate URL prefix to prepend to requests,
based on the host provided in settings.
|
def sys_deallocate(self, cpu, addr, size):
logger.info("DEALLOCATE(0x%08x, %d)" % (addr, size))
if addr & 0xfff != 0:
logger.info("DEALLOCATE: addr is not page aligned")
return Decree.CGC_EINVAL
if size == 0:
logger.info("DEALLOCATE:length is zero")
return Decree.CGC_EINVAL
cpu.memory.munmap(addr, size)
self.syscall_trace.append(("_deallocate", -1, size))
return 0
|
deallocate - remove allocations
The deallocate system call deletes the allocations for the specified
address range, and causes further references to the addresses within the
range to generate invalid memory accesses. The region is also
automatically deallocated when the process is terminated.
The address addr must be a multiple of the page size. The length parameter
specifies the size of the region to be deallocated in bytes. All pages
containing a part of the indicated range are deallocated, and subsequent
references will terminate the process. It is not an error if the indicated
range does not contain any allocated pages.
The deallocate function is invoked through system call number 6.
:param cpu: current CPU
:param addr: the starting address to unmap.
:param size: the size of the portion to unmap.
:return 0 On success
EINVAL addr is not page aligned.
EINVAL length is zero.
EINVAL any part of the region being deallocated is outside the valid
address range of the process.
:param cpu: current CPU.
:return: C{0} on success.
|
def _sm_cleanup(self, *args, **kwargs):
if self._done_notification_func is not None:
self._done_notification_func()
self._timer.cancel()
|
Delete all state associated with the chaos session
|
def integral_approx_estimator(x, y):
a, b = (0., 0.)
x = np.array(x)
y = np.array(y)
idx, idy = (np.argsort(x), np.argsort(y))
for x1, x2, y1, y2 in zip(x[[idx]][:-1], x[[idx]][1:], y[[idx]][:-1], y[[idx]][1:]):
if x1 != x2 and y1 != y2:
a = a + np.log(np.abs((y2 - y1) / (x2 - x1)))
for x1, x2, y1, y2 in zip(x[[idy]][:-1], x[[idy]][1:], y[[idy]][:-1], y[[idy]][1:]):
if x1 != x2 and y1 != y2:
b = b + np.log(np.abs((x2 - x1) / (y2 - y1)))
return (a - b)/len(x)
|
Integral approximation estimator for causal inference.
:param x: input variable x 1D
:param y: input variable y 1D
:return: Return value of the IGCI model >0 if x->y otherwise if return <0
|
def evaluate(self, repo, spec, args):
status = []
if len(spec['files']) == 0:
return status
with cd(repo.rootdir):
rules = None
if 'rules-files' in spec and len(spec['rules-files']) > 0:
rulesfiles = spec['rules-files']
rules = {}
for f in rulesfiles:
d = json.loads(open(f).read())
rules.update(d)
elif 'rules' in spec:
rules = {
'inline': spec['rules']
}
if rules is None or len(rules) == 0:
print("Regression quality validation has been enabled but no rules file has been specified")
print("Example: { 'min-r2': 0.25 }. Put this either in file or in dgit.json")
raise InvalidParameters("Regression quality checking rules missing")
files = dict([(f, open(f).read()) for f in spec['files']])
for r in rules:
if 'min-r2' not in rules[r]:
continue
minr2 = float(rules[r]['min-r2'])
for f in files:
match = re.search(r"R-squared:\s+(\d.\d+)", files[f])
if match is None:
status.append({
'target': f,
'validator': self.name,
'description': self.description,
'rules': r,
'status': "ERROR",
'message': "Invalid model output"
})
else:
r2 = match.group(1)
r2 = float(r2)
if r2 > minr2:
status.append({
'target': f,
'validator': self.name,
'description': self.description,
'rules': r,
'status': "OK",
'message': "Acceptable R2"
})
else:
status.append({
'target': f,
'validator': self.name,
'description': self.description,
'rules': r,
'status': "ERROR",
'message': "R2 is too low"
})
return status
|
Evaluate the files identified for checksum.
|
def swap_buffers(self):
self.frames += 1
if self.headless_frames and self.frames >= self.headless_frames:
self.close()
|
Headless window currently don't support double buffering.
We only increment the frame counter here.
|
def spend_key(self):
key = self._backend.spend_key()
if key == numbers.EMPTY_KEY:
return None
return key
|
Returns private spend key. None if wallet is view-only.
:rtype: str or None
|
def put(self, task, *args, **kwargs):
if not self.isopen:
logger = logging.getLogger(__name__)
logger.warning('the drop box is not open')
return
package = TaskPackage(task=task, args=args, kwargs=kwargs)
return self.dropbox.put(package)
|
put a task and its arguments
If you need to put multiple tasks, it can be faster to put
multiple tasks with `put_multiple()` than to use this method
multiple times.
Parameters
----------
task : a function
A function to be executed
args : list
A list of positional arguments to the `task`
kwargs : dict
A dict with keyword arguments to the `task`
Returns
-------
int, str, or any hashable and sortable
A task ID. IDs are sortable in the order in which the
corresponding tasks are put.
|
def get_traffic_meter(self):
_LOGGER.info("Get traffic meter")
def parse_text(text):
def tofloats(lst): return (float(t) for t in lst)
try:
if "/" in text:
return tuple(tofloats(text.split('/')))
elif ":" in text:
hour, mins = tofloats(text.split(':'))
return timedelta(hours=hour, minutes=mins)
else:
return float(text)
except ValueError:
return None
success, response = self._make_request(SERVICE_DEVICE_CONFIG,
"GetTrafficMeterStatistics")
if not success:
return None
success, node = _find_node(
response.text,
".//GetTrafficMeterStatisticsResponse")
if not success:
return None
return {t.tag: parse_text(t.text) for t in node}
|
Return dict of traffic meter stats.
Returns None if error occurred.
|
def console_output(self, instance=None):
if instance is None:
instance = self.instance()
for stage in instance['stages']:
for job in stage['jobs']:
if job['result'] not in self.final_results:
continue
artifact = self.artifact(
instance['counter'],
stage['name'],
job['name'],
stage['counter']
)
output = artifact.get('cruise-output/console.log')
yield (
{
'pipeline': self.name,
'pipeline_counter': instance['counter'],
'stage': stage['name'],
'stage_counter': stage['counter'],
'job': job['name'],
'job_result': job['result'],
},
output.body
)
|
Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result
|
def T_dependent_property_integral(self, T1, T2):
r
Tavg = 0.5*(T1+T2)
if self.method:
if self.test_method_validity(Tavg, self.method):
try:
return self.calculate_integral(T1, T2, self.method)
except:
pass
sorted_valid_methods = self.select_valid_methods(Tavg)
for method in sorted_valid_methods:
try:
return self.calculate_integral(T1, T2, method)
except:
pass
return None
|
r'''Method to calculate the integral of a property with respect to
temperature, using a specified method. Methods found valid by
`select_valid_methods` are attempted until a method succeeds. If no
methods are valid and succeed, None is returned.
Calls `calculate_integral` internally to perform the actual
calculation.
.. math::
\text{integral} = \int_{T_1}^{T_2} \text{property} \; dT
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
|
def validate(method):
name_error = 'configuration option "{}" is not supported'
@functools.wraps(method)
def validator(self, name, *args):
if name not in self.allowed_opts:
raise ValueError(name_error.format(name))
return method(self, name, *args)
return validator
|
Config option name value validator decorator.
|
def _usage(prog_name=os.path.basename(sys.argv[0])):
spacer = ' ' * len('usage: ')
usage = prog_name + ' -b LIST [-S SEPARATOR] [file ...]\n' \
+ spacer + prog_name + ' -c LIST [-S SEPERATOR] [file ...]\n' \
+ spacer + prog_name \
+ ' -f LIST [-d DELIM] [-e] [-S SEPERATOR] [-s] [file ...]'
return "usage: " + usage.rstrip()
|
Returns usage string with no trailing whitespace.
|
def compat_convertHashedIndexes(self, objs, conn=None):
if conn is None:
conn = self._get_connection()
fields = []
for indexedField in self.indexedFields:
origField = self.fields[indexedField]
if 'hashIndex' not in origField.__class__.__new__.__code__.co_varnames:
continue
if indexedField.hashIndex is True:
hashingField = origField
regField = origField.copy()
regField.hashIndex = False
else:
regField = origField
hashingField = origField.copy()
hashingField.hashIndex = True
fields.append ( (origField, regField, hashingField) )
objDicts = [obj.asDict(True, forStorage=True) for obj in objs]
for objDict in objDicts:
pipeline = conn.pipeline()
pk = objDict['_id']
for origField, regField, hashingField in fields:
val = objDict[indexedField]
self._rem_id_from_index(regField, pk, val, pipeline)
self._rem_id_from_index(hashingField, pk, val, pipeline)
self._add_id_to_index(origField, pk, val, pipeline)
pipeline.execute()
|
compat_convertHashedIndexes - Reindex all fields for the provided objects, where the field value is hashed or not.
If the field is unhashable, do not allow.
NOTE: This works one object at a time. It is intended to be used while your application is offline,
as it doesn't make sense to be changing your model while applications are actively using it.
@param objs <IndexedRedisModel objects to convert>
@param conn <redis.Redis or None> - Specific Redis connection or None to reuse.
|
def delete_switch(apps, schema_editor):
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.filter(name=ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH).delete()
|
Delete the `role_based_access_control` switch.
|
def execute_nonstop_tasks(self, tasks_cls):
self.execute_batch_tasks(tasks_cls,
self.conf['sortinghat']['sleep_for'],
self.conf['general']['min_update_delay'], False)
|
Just a wrapper to the execute_batch_tasks method
|
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
|
Reset the parameters.
|
def strip_codes(s: Any) -> str:
return codepat.sub('', str(s) if (s or (s == 0)) else '')
|
Strip all color codes from a string.
Returns empty string for "falsey" inputs.
|
def seek(self, relative_position):
self._player_interface.Seek(Int64(1000.0 * 1000 * relative_position))
self.seekEvent(self, relative_position)
|
Seek the video by `relative_position` seconds
Args:
relative_position (float): The position in seconds to seek to.
|
def merge_with_published(self):
pid, first = self.fetch_published()
lca = first.revisions[self['_deposit']['pid']['revision_id']]
args = [lca.dumps(), first.dumps(), self.dumps()]
for arg in args:
del arg['$schema'], arg['_deposit']
args.append({})
m = Merger(*args)
try:
m.run()
except UnresolvedConflictsException:
raise MergeConflict()
return patch(m.unified_patches, lca)
|
Merge changes with latest published version.
|
def parse_unique_urlencoded(content):
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for key, value in six.iteritems(urlencoded_params):
if len(value) != 1:
msg = ('URL-encoded content contains a repeated value:'
'%s -> %s' % (key, ', '.join(value)))
raise ValueError(msg)
params[key] = value[0]
return params
|
Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
|
def _issue_cert(self, domain):
def errback(failure):
failure.trap(txacme_ServerError)
acme_error = failure.value.message
if acme_error.code in ['rateLimited', 'serverInternal',
'connection', 'unknownHost']:
self.log.error(
'Error ({code}) issuing certificate for "{domain}": '
'{detail}', code=acme_error.code, domain=domain,
detail=acme_error.detail)
else:
return failure
d = self.txacme_service.issue_cert(domain)
return d.addErrback(errback)
|
Issue a certificate for the given domain.
|
def get_all_locations(self, timeout: int=None):
url = self.api.LOCATIONS
return self._get_model(url, timeout=timeout)
|
Get a list of all locations
Parameters
----------
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
|
def set_fan_power(self, power):
if power > 255:
raise ValueError("The fan power should be a single byte (0-255).")
a = self.cnxn.xfer([0x42])[0]
sleep(10e-3)
b = self.cnxn.xfer([0x00])[0]
c = self.cnxn.xfer([power])[0]
sleep(0.1)
return True if a == 0xF3 and b == 0x42 and c == 0x00 else False
|
Set only the Fan power.
:param power: Fan power value as an integer between 0-255.
:type power: int
:rtype: boolean
:Example:
>>> alpha.set_fan_power(255)
True
|
def transform_launch_points(self, content_metadata_item):
return [{
'providerID': self.enterprise_configuration.provider_id,
'launchURL': content_metadata_item['enrollment_url'],
'contentTitle': content_metadata_item['title'],
'contentID': self.get_content_id(content_metadata_item),
'launchType': 3,
'mobileEnabled': True,
'mobileLaunchURL': content_metadata_item['enrollment_url'],
}]
|
Return the content metadata item launch points.
SAPSF allows you to transmit an arry of content launch points which
are meant to represent sections of a content item which a learner can
launch into from SAPSF. Currently, we only provide a single launch
point for a content item.
|
def getScript(self, scriptname):
script = self.description.get('scripts', {}).get(scriptname, None)
if script is not None:
if isinstance(script, str) or isinstance(script, type(u'unicode string')):
import shlex
script = shlex.split(script)
if len(script) and script[0].lower().endswith('.py'):
if not os.path.isabs(script[0]):
absscript = os.path.abspath(os.path.join(self.path, script[0]))
logger.debug('rewriting script %s to be absolute path %s', script[0], absscript)
script[0] = absscript
import sys
script = [sys.executable] + script
return script
|
Return the specified script command. If the first part of the
command is a .py file, then the current python interpreter is
prepended.
If the script is a single string, rather than an array, it is
shlex-split.
|
def render_to_string(template, extra=None):
from jinja2 import Template
extra = extra or {}
final_fqfn = find_template(template)
assert final_fqfn, 'Template not found: %s' % template
template_content = open(final_fqfn, 'r').read()
t = Template(template_content)
if extra:
context = env.copy()
context.update(extra)
else:
context = env
rendered_content = t.render(**context)
rendered_content = rendered_content.replace('"', '"')
return rendered_content
|
Renders the given template to a string.
|
def get_key(key_name,
value_name,
jsonify,
no_decrypt,
stash,
passphrase,
backend):
if value_name and no_decrypt:
sys.exit('VALUE_NAME cannot be used in conjuction with --no-decrypt')
stash = _get_stash(backend, stash, passphrase, quiet=jsonify or value_name)
try:
key = stash.get(key_name=key_name, decrypt=not no_decrypt)
except GhostError as ex:
sys.exit(ex)
if not key:
sys.exit('Key `{0}` not found'.format(key_name))
if value_name:
key = key['value'].get(value_name)
if not key:
sys.exit(
'Value name `{0}` could not be found under key `{1}`'.format(
value_name, key_name))
if jsonify or value_name:
click.echo(
json.dumps(key, indent=4, sort_keys=False).strip('"'),
nl=True)
else:
click.echo('Retrieving key...')
click.echo('\n' + _prettify_dict(key))
|
Retrieve a key from the stash
\b
`KEY_NAME` is the name of the key to retrieve
`VALUE_NAME` is a single value to retrieve e.g. if the value
of the key `test` is `a=b,b=c`, `ghost get test a`a will return
`b`
|
def get_days_span(self, month_index):
is_first_month = month_index == 0
is_last_month = month_index == self.__len__() - 1
y = int(self.start_date.year + (self.start_date.month + month_index) / 13)
m = int((self.start_date.month + month_index) % 12 or 12)
total = calendar.monthrange(y, m)[1]
if is_first_month and is_last_month:
return (self.end_date - self.start_date).days + 1
else:
if is_first_month:
return total - self.start_date.day + 1
elif is_last_month:
return self.end_date.day
else:
return total
|
Calculate how many days the month spans.
|
def get_new_apikey(lcc_server):
USERHOME = os.path.expanduser('~')
APIKEYFILE = os.path.join(USERHOME,
'.astrobase',
'lccs',
'apikey-%s' % lcc_server.replace(
'https://',
'https-'
).replace(
'http://',
'http-'
))
url = '%s/api/key' % lcc_server
resp = urlopen(url)
if resp.code == 200:
respdict = json.loads(resp.read())
else:
LOGERROR('could not fetch the API key from LCC-Server at: %s' %
lcc_server)
LOGERROR('the HTTP status code was: %s' % resp.status_code)
return None
apikey = respdict['result']['apikey']
expires = respdict['result']['expires']
if not os.path.exists(os.path.dirname(APIKEYFILE)):
os.makedirs(os.path.dirname(APIKEYFILE))
with open(APIKEYFILE,'w') as outfd:
outfd.write('%s %s\n' % (apikey, expires))
os.chmod(APIKEYFILE, 0o100600)
LOGINFO('key fetched successfully from: %s. expires on: %s' % (lcc_server,
expires))
LOGINFO('written to: %s' % APIKEYFILE)
return apikey, expires
|
This gets a new API key from the specified LCC-Server.
NOTE: this only gets an anonymous API key. To get an API key tied to a user
account (and associated privilege level), see the `import_apikey` function
below.
Parameters
----------
lcc_server : str
The base URL of the LCC-Server from where the API key will be fetched.
Returns
-------
(apikey, expiry) : tuple
This returns a tuple with the API key and its expiry date.
|
def inject_method(self, func, name=None):
new_method = func.__get__(self, self.__class__)
if name is None:
name = func.__name__
setattr(self, name, new_method)
|
Injects a function into an object instance as a bound method
The main use case of this function is for monkey patching. While monkey
patching is sometimes necessary it should generally be avoided. Thus, we
simply remind the developer that there might be a better way.
Args:
self (object): instance to inject a function into
func (func): the function to inject (must contain an arg for self)
name (str): name of the method. optional. If not specified the name
of the function is used.
Example:
>>> class Foo(object):
>>> def bar(self):
>>> return 'bar'
>>> def baz(self):
>>> return 'baz'
>>> self = Foo()
>>> assert self.bar() == 'bar'
>>> assert not hasattr(self, 'baz')
>>> inject_method(self, baz)
>>> assert not hasattr(Foo, 'baz'), 'should only change one instance'
>>> assert self.baz() == 'baz'
>>> inject_method(self, baz, 'bar')
>>> assert self.bar() == 'baz'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.