code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
|---|---|
def export_deleted_fields(self):
result = []
if self.__modified_data__ is not None:
return result
for index, item in enumerate(self):
try:
deleted_fields = item.export_deleted_fields()
result.extend(['{}.{}'.format(index, key) for key in deleted_fields])
except AttributeError:
pass
return result
|
Returns a list with any deleted fields form original data.
In tree models, deleted fields on children will be appended.
|
def download(self, uri, file_path):
with open(file_path, 'wb') as file:
return self._connection.download_to_stream(file, uri)
|
Downloads the contents of the requested URI to a stream.
Args:
uri: URI
file_path: File path destination
Returns:
bool: Indicates if the file was successfully downloaded.
|
def terminate(pid, sig, timeout):
os.kill(pid, sig)
start = time.time()
while True:
try:
_, status = os.waitpid(pid, os.WNOHANG)
except OSError as exc:
if exc.errno != errno.ECHILD:
raise
else:
if status:
return True
if not is_running(pid):
return True
if time.time()-start>=timeout:
return False
time.sleep(0.1)
|
Terminates process with PID `pid` and returns True if process finished
during `timeout`. Current user must have permission to access process
information.
|
def _find_short_paths(self, paths):
path_parts_s = [path.split(os.path.sep) for path in paths]
root_node = {}
for parts in sorted(path_parts_s, key=len, reverse=True):
node = root_node
for part in parts:
node = node.setdefault(part, {})
node.clear()
short_path_s = set()
self._collect_leaf_paths(
node=root_node,
path_parts=(),
leaf_paths=short_path_s,
)
return short_path_s
|
Find short paths of given paths.
E.g. if both `/home` and `/home/aoik` exist, only keep `/home`.
:param paths:
Paths.
:return:
Set of short paths.
|
def _advance_window(self):
x_to_remove, y_to_remove = self._x_in_window[0], self._y_in_window[0]
self._window_bound_lower += 1
self._update_values_in_window()
x_to_add, y_to_add = self._x_in_window[-1], self._y_in_window[-1]
self._remove_observation(x_to_remove, y_to_remove)
self._add_observation(x_to_add, y_to_add)
|
Update values in current window and the current window means and variances.
|
def users_changed_handler(stream):
while True:
yield from stream.get()
users = [
{'username': username, 'uuid': uuid_str}
for username, uuid_str in ws_connections.values()
]
packet = {
'type': 'users-changed',
'value': sorted(users, key=lambda i: i['username'])
}
logger.debug(packet)
yield from fanout_message(ws_connections.keys(), packet)
|
Sends connected client list of currently active users in the chatroom
|
def e_164(msisdn: str) -> str:
number = phonenumbers.parse("+{}".format(msisdn.lstrip("+")), None)
return phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164)
|
Returns the msisdn in E.164 international format.
|
def write(self):
html = self.render()
if self.file_type == 'pdf':
self.write_pdf(html)
else:
with codecs.open(self.destination_file, 'w',
encoding='utf_8') as outfile:
outfile.write(html)
|
Writes generated presentation code into the destination file.
|
def import_data_object_to_graph(diagram_graph, process_id, process_attributes, data_object_element):
BpmnDiagramGraphImport.import_flow_node_to_graph(diagram_graph, process_id, process_attributes,
data_object_element)
data_object_id = data_object_element.getAttribute(consts.Consts.id)
diagram_graph.node[data_object_id][consts.Consts.is_collection] = \
data_object_element.getAttribute(consts.Consts.is_collection) \
if data_object_element.hasAttribute(consts.Consts.is_collection) else "false"
|
Adds to graph the new element that represents BPMN data object.
Data object inherits attributes from FlowNode. In addition, an attribute 'isCollection' is added to the node.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param data_object_element: object representing a BPMN XML 'dataObject' element.
|
def from_string(string_data, file_format="xyz"):
mols = pb.readstring(str(file_format), str(string_data))
return BabelMolAdaptor(mols.OBMol)
|
Uses OpenBabel to read a molecule from a string in all supported
formats.
Args:
string_data: String containing molecule data.
file_format: String specifying any OpenBabel supported formats.
Returns:
BabelMolAdaptor object
|
def restore_taskset(self, taskset_id):
try:
return self.get(taskset_id=taskset_id)
except self.model.DoesNotExist:
pass
|
Get the async result instance by taskset id.
|
def _run_queries(self, agent_strs, stmt_types, params, persist):
self._query_over_statement_types(agent_strs, stmt_types, params)
assert len(self.__done_dict) == len(stmt_types) \
or None in self.__done_dict.keys(), \
"Done dict was not initiated for all stmt_type's."
if not persist:
self._compile_statements()
return
while not self._all_done():
self._query_over_statement_types(agent_strs, stmt_types, params)
self._compile_statements()
return
|
Use paging to get all statements requested.
|
def get_resourceprovider_logger(name=None, short_name=" ", log_to_file=True):
global LOGGERS
loggername = name
logger = _check_existing_logger(loggername, short_name)
if logger is not None:
return logger
logger_config = LOGGING_CONFIG.get(name, DEFAULT_LOGGING_CONFIG)
logger = _get_basic_logger(loggername, log_to_file, get_base_logfilename(loggername + ".log"))
cbh = logging.StreamHandler()
cbh.formatter = BenchFormatterWithType(COLOR_ON)
if VERBOSE_LEVEL > 0 and not SILENT_ON:
cbh.setLevel(logging.DEBUG)
elif SILENT_ON:
cbh.setLevel(logging.WARN)
else:
cbh.setLevel(getattr(logging, logger_config.get("level")))
logger.addHandler(cbh)
LOGGERS[loggername] = BenchLoggerAdapter(logger, {"source": short_name})
return LOGGERS[loggername]
|
Get a logger for ResourceProvider and it's components, such as Allocators.
:param name: Name for logger
:param short_name: Shorthand name for the logger
:param log_to_file: Boolean, True if logger should log to a file as well.
:return: Logger
|
def get_state_vector_sampler(n_sample):
def sampling_by_measurement(circuit, meas):
val = 0.0
e = expect(circuit.run(returns="statevector"), meas)
bits, probs = zip(*e.items())
dists = np.random.multinomial(n_sample, probs) / n_sample
return dict(zip(tuple(bits), dists))
return sampling_by_measurement
|
Returns a function which get the expectations by sampling the state vector
|
def exclude_functions(self, *funcs):
for f in funcs:
f.exclude = True
run_time_s = sum(0 if s.exclude else s.own_time_s for s in self.stats)
cProfileFuncStat.run_time_s = run_time_s
|
Excludes the contributions from the following functions.
|
def doAction(self, loginMethod, actionClass):
loginAccount = loginMethod.account
return actionClass(
self,
loginMethod.localpart + u'@' + loginMethod.domain,
loginAccount)
|
Show the form for the requested action.
|
def now(self):
try:
if self.now_id:
return Chart(self.now_id)
else:
log.debug('attempted to get current chart, but none was found')
return
except AttributeError:
log.debug('attempted to get current ("now") chart from a chart without a now attribute')
return None
|
fetch the chart identified by this chart's now_id attribute
if the now_id is either null or not present for this chart return None
returns the new chart instance on sucess
|
def _compute_nonlinear_magnitude_term(self, C, mag):
return self._compute_linear_magnitude_term(C, mag) +\
C["b3"] * ((mag - 7.0) ** 2.)
|
Computes the non-linear magnitude term
|
async def wait(self):
while True:
await self.eio.wait()
await self.sleep(1)
if not self._reconnect_task:
break
await self._reconnect_task
if self.eio.state != 'connected':
break
|
Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
Note: this method is a coroutine.
|
def _get_spec(self) -> dict:
if self.spec:
return self.spec
self.spec = requests.get(self.SPEC_URL.format(self.version)).json()
return self.spec
|
Fetches the OpenAPI spec from the server.
If the spec has already been fetched, the cached version is returned instead.
ArgS:
None
Returns:
OpenAPI spec data
|
def _get_runner(classpath, main, jvm_options, args, executor,
cwd, distribution,
create_synthetic_jar, synthetic_jar_dir):
executor = executor or SubprocessExecutor(distribution)
safe_cp = classpath
if create_synthetic_jar:
safe_cp = safe_classpath(classpath, synthetic_jar_dir)
logger.debug('Bundling classpath {} into {}'.format(':'.join(classpath), safe_cp))
return executor.runner(safe_cp, main, args=args, jvm_options=jvm_options, cwd=cwd)
|
Gets the java runner for execute_java and execute_java_async.
|
def datetime_parsing(text, base_date=datetime.now()):
matches = []
found_array = []
for expression, function in regex:
for match in expression.finditer(text):
matches.append((match.group(), function(match, base_date), match.span()))
for match, value, spans in matches:
subn = re.subn(
'(?!<TAG[^>]*?>)' + match + '(?![^<]*?</TAG>)', '<TAG>' + match + '</TAG>', text
)
text = subn[0]
is_substituted = subn[1]
if is_substituted != 0:
found_array.append((match, value, spans))
return sorted(found_array, key=lambda match: match and match[2][0])
|
Extract datetime objects from a string of text.
|
def warsaw_up_to_warsaw(C, parameters=None, sectors=None):
C_in = smeftutil.wcxf2arrays_symmetrized(C)
p = default_parameters.copy()
if parameters is not None:
p.update(parameters)
Uu = Ud = Ul = Ue = np.eye(3)
V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"])
Uq = V
C_out = smeftutil.flavor_rotation(C_in, Uq, Uu, Ud, Ul, Ue)
C_out = smeftutil.arrays2wcxf_nonred(C_out)
warsaw = wcxf.Basis['SMEFT', 'Warsaw']
all_wcs = set(warsaw.all_wcs)
return {k: v for k, v in C_out.items() if k in all_wcs}
|
Translate from the 'Warsaw up' basis to the Warsaw basis.
Parameters used:
- `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined
as the mismatch between left-handed quark mass matrix diagonalization
matrices).
|
def GetValue(self, row, col):
if len(self.dataframe):
return str(self.dataframe.iloc[row, col])
return ''
|
Find the matching value from pandas DataFrame,
return it.
|
def get_brokendate_fx_forward_rate(self, asset_manager_id, asset_id, price_date, value_date):
self.logger.info('Calculate broken date FX Forward - Asset Manager: %s - Asset (currency): %s - Price Date: %s - Value Date: %s', asset_manager_id, asset_id, price_date, value_date)
url = '%s/brokendateforward/%s' % (self.endpoint, asset_manager_id)
params = {'value_date': value_date, 'asset_id':asset_id, 'price_date': price_date}
response = self.session.get(url=url, params = params)
if response.ok:
forward_rate = response.json()
self.logger.info('Retrieved broken date FX forward rate %s - %s: %s', asset_id, price_date, value_date)
return forward_rate
else:
self.logger.error(response.text)
response.raise_for_status()
|
This method takes calculates broken date forward FX rate based on the passed in parameters
|
def convert_to_feature_collection(self):
if self.data['type'] == 'FeatureCollection':
return
if not self.embed:
raise ValueError(
'Data is not a FeatureCollection, but it should be to apply '
'style or highlight. Because `embed=False` it cannot be '
'converted into one.\nEither change your geojson data to a '
'FeatureCollection, set `embed=True` or disable styling.')
if 'geometry' not in self.data.keys():
self.data = {'type': 'Feature', 'geometry': self.data}
self.data = {'type': 'FeatureCollection', 'features': [self.data]}
|
Convert data into a FeatureCollection if it is not already.
|
def split_array_like(df, columns=None):
dtypes = df.dtypes
if columns is None:
columns = df.columns
elif isinstance(columns, str):
columns = [columns]
for column in columns:
expanded = np.repeat(df.values, df[column].apply(len).values, axis=0)
expanded[:, df.columns.get_loc(column)] = np.concatenate(df[column].tolist())
df = pd.DataFrame(expanded, columns=df.columns)
for i, dtype in enumerate(dtypes):
df.iloc[:,i] = df.iloc[:,i].astype(dtype)
return df
|
Split cells with array-like values along row axis.
Column names are maintained. The index is dropped.
Parameters
----------
df : ~pandas.DataFrame
Data frame ``df[columns]`` should contain :py:class:`~pytil.numpy.ArrayLike`
values.
columns : ~typing.Collection[str] or str or None
Columns (or column) whose values to split. Defaults to ``df.columns``.
Returns
-------
~pandas.DataFrame
Data frame with array-like values in ``df[columns]`` split across rows,
and corresponding values in other columns repeated.
Examples
--------
>>> df = pd.DataFrame([[1,[1,2],[1]],[1,[1,2],[3,4,5]],[2,[1],[1,2]]], columns=('check', 'a', 'b'))
>>> df
check a b
0 1 [1, 2] [1]
1 1 [1, 2] [3, 4, 5]
2 2 [1] [1, 2]
>>> split_array_like(df, ['a', 'b'])
check a b
0 1 1 1
1 1 2 1
2 1 1 3
3 1 1 4
4 1 1 5
5 1 2 3
6 1 2 4
7 1 2 5
8 2 1 1
9 2 1 2
|
def indexed_file(self, f):
filename, handle = f
if handle is None and filename is not None:
handle = open(filename)
if (handle is None and filename is None) or \
(filename != self._indexed_filename) or \
(handle != self._indexed_file_handle):
self.index = {}
if ((handle is not None or filename is not None) and
(self.record_iterator is None or self.record_hash_function is None)):
raise IndexError("Setting index file failed; reason: iterator "
"(self.record_iterator) or hash function "
"(self.record_hash_function) have to be set first")
self._indexed_filename = filename
self._indexed_file_handle = handle
|
Setter for information about the file this object indexes.
:param f: a tuple of (filename, handle), either (or both) of which can be
None. If the handle is None, but filename is provided, then
handle is created from the filename. If both handle and filename
are None, or they don't match the previous values indexed by this
object, any current data in this index is cleared. If either are
not None, we require the iterator and the hash function for this
object to already be set.
|
def run(self):
if self.args['add']:
self.action_add()
elif self.args['rm']:
self.action_rm()
elif self.args['show']:
self.action_show()
elif self.args['rename']:
self.action_rename()
else:
self.action_run_command()
|
Perform the specified action
|
def serialize(exc):
return {
'exc_type': type(exc).__name__,
'exc_path': get_module_path(type(exc)),
'exc_args': list(map(safe_for_serialization, exc.args)),
'value': safe_for_serialization(exc),
}
|
Serialize `self.exc` into a data dictionary representing it.
|
def _error_catcher(self):
clean_exit = False
try:
try:
yield
except SocketTimeout:
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
if 'read operation timed out' not in str(e):
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except (HTTPException, SocketError) as e:
raise ProtocolError('Connection broken: %r' % e, e)
clean_exit = True
finally:
if not clean_exit:
if self._original_response:
self._original_response.close()
if self._connection:
self._connection.close()
if self._original_response and self._original_response.isclosed():
self.release_conn()
|
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
|
def get_level_values(self, level):
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
|
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
---------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
|
def subnet_delete(auth=None, **kwargs):
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_subnet(**kwargs)
|
Delete a subnet
name
Name or ID of the subnet to update
CLI Example:
.. code-block:: bash
salt '*' neutronng.subnet_delete name=subnet1
salt '*' neutronng.subnet_delete \
name=1dcac318a83b4610b7a7f7ba01465548
|
def write(self, path=None):
if not self._path and not path:
raise ConfigException('no config path given')
if path:
self._path = path
if '~' in self._path:
self._path = os.path.expanduser(self._path)
f = open(self._path, 'w')
f.write(json.dumps(self._data))
f.close()
|
Write config data to disk. If this config object already has a path,
it will write to it. If it doesn't, one must be passed during this
call.
:param str path: path to config file
|
def _api_post(self, url, **kwargs):
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._post(**kwargs)
|
A convenience wrapper for _post. Adds headers, auth and base url by
default
|
def drawQuad(page, quad, color=None, fill=None, dashes=None,
width=1, roundCap=False, morph=None, overlay=True):
img = page.newShape()
Q = img.drawQuad(Quad(quad))
img.finish(color=color, fill=fill, dashes=dashes, width=width,
roundCap=roundCap, morph=morph)
img.commit(overlay)
return Q
|
Draw a quadrilateral.
|
def get_hosting_devices_for_agent(self, context, host):
agent_ids = self._dmplugin.get_cfg_agents(context, active=None,
filters={'host': [host]})
if agent_ids:
return [self._dmplugin.get_device_info_for_agent(context, hd_db)
for hd_db in self._dmplugin.get_hosting_devices_db(
context, filters={'cfg_agent_id': [agent_ids[0].id]})]
return []
|
Fetches routers that a Cisco cfg agent is managing.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
:param context: contains user information
:param host: originator of callback
:returns: dict of hosting devices managed by the cfg agent
|
def parallel_for(loop_function, parameters, nb_threads=100):
import multiprocessing.pool
from contextlib import closing
with closing(multiprocessing.pool.ThreadPool(nb_threads)) as pool:
return pool.map(loop_function, parameters)
|
Execute the loop body in parallel.
.. note:: Race-Conditions
Executing code in parallel can cause an error class called
"race-condition".
Parameters
----------
loop_function : Python function which takes a tuple as input
parameters : List of tuples
Each element here should be executed in parallel.
Returns
-------
return_values : list of return values
|
def deploy(self, initial_instance_count, instance_type, accelerator_type=None, endpoint_name=None,
use_compiled_model=False, update_endpoint=False, **kwargs):
self._ensure_latest_training_job()
endpoint_name = endpoint_name or self.latest_training_job.name
self.deploy_instance_type = instance_type
if use_compiled_model:
family = '_'.join(instance_type.split('.')[:-1])
if family not in self._compiled_models:
raise ValueError("No compiled model for {}. "
"Please compile one with compile_model before deploying.".format(family))
model = self._compiled_models[family]
else:
model = self.create_model(**kwargs)
return model.deploy(
instance_type=instance_type,
initial_instance_count=initial_instance_count,
accelerator_type=accelerator_type,
endpoint_name=endpoint_name,
update_endpoint=update_endpoint,
tags=self.tags)
|
Deploy the trained model to an Amazon SageMaker endpoint and return a ``sagemaker.RealTimePredictor`` object.
More information:
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Args:
initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction.
instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction,
for example, 'ml.c4.xlarge'.
accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading
and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator
will be attached to the endpoint.
For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified, the name of
the training job is used.
use_compiled_model (bool): Flag to select whether to use compiled (optimized) model. Default: False.
update_endpoint (bool): Flag to update the model in an existing Amazon SageMaker endpoint.
If True, this will deploy a new EndpointConfig to an already existing endpoint and delete resources
corresponding to the previous EndpointConfig. Default: False
tags(List[dict[str, str]]): Optional. The list of tags to attach to this specific endpoint. Example:
>>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]
For more information about tags, see https://boto3.amazonaws.com/v1/documentation\
/api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method,
which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences.
|
def list_of_objects_from_api(url):
response = requests.get(url)
content = json.loads(response.content)
count = content["meta"]["total_count"]
if count <= 20:
return content["items"]
else:
items = [] + content["items"]
num_requests = int(math.ceil(count // 20))
for i in range(1, num_requests + 1):
paginated_url = "{}?limit=20&offset={}".format(
url, str(i * 20))
paginated_response = requests.get(paginated_url)
items = items + json.loads(paginated_response.content)["items"]
return items
|
API only serves 20 pages by default
This fetches info on all of items and return them as a list
Assumption: limit of API is not less than 20
|
def _set_nil(self, element, value_parser):
if self.value:
element.text = value_parser(self.value)
else:
element.attrib['nil'] = 'true'
return element
|
Method to set an attribute of the element.
If the value of the field is None then set the nil='true' attribute in the element
:param element: the element which needs to be modified
:type element: xml.etree.ElementTree.Element
:param value_parser: the lambda function which changes will be done to the self.value
:type value_parser: def
:return: the element with or without the specific attribute
:rtype: xml.etree.ElementTree.Element
|
def get_dataset(self, name, multi_instance=0):
return [elem for elem in self._data_list
if elem.name == name and elem.multi_id == multi_instance][0]
|
get a specific dataset.
example:
try:
gyro_data = ulog.get_dataset('sensor_gyro')
except (KeyError, IndexError, ValueError) as error:
print(type(error), "(sensor_gyro):", error)
:param name: name of the dataset
:param multi_instance: the multi_id, defaults to the first
:raises KeyError, IndexError, ValueError: if name or instance not found
|
def load_to_array(self, keys):
data = np.empty((len(self.data[keys[0]]), len(keys)))
for i in range(0, len(self.data[keys[0]])):
for j, key in enumerate(keys):
data[i, j] = self.data[key][i]
return data
|
This loads the data contained in the catalogue into a numpy array. The
method works only for float data
:param keys:
A list of keys to be uploaded into the array
:type list:
|
def get_codon(seq, codon_no, start_offset):
seq = seq.replace("-","")
codon_start_pos = int(codon_no - 1)*3 - start_offset
codon = seq[codon_start_pos:codon_start_pos + 3]
return codon
|
This function takes a sequece and a codon number and returns the codon
found in the sequence at that position
|
def _to_dsn(hosts):
p = urlparse(hosts)
try:
user_and_pw, netloc = p.netloc.split('@', maxsplit=1)
except ValueError:
netloc = p.netloc
user_and_pw = 'crate'
try:
host, port = netloc.split(':', maxsplit=1)
except ValueError:
host = netloc
port = 5432
dbname = p.path[1:] if p.path else 'doc'
dsn = f'postgres://{user_and_pw}@{host}:{port}/{dbname}'
if p.query:
dsn += '?' + '&'.join(k + '=' + v[0] for k, v in parse_qs(p.query).items())
return dsn
|
Convert a host URI into a dsn for aiopg.
>>> _to_dsn('aiopg://myhostname:4242/mydb')
'postgres://crate@myhostname:4242/mydb'
>>> _to_dsn('aiopg://myhostname:4242')
'postgres://crate@myhostname:4242/doc'
>>> _to_dsn('aiopg://hoschi:pw@myhostname:4242/doc?sslmode=require')
'postgres://hoschi:pw@myhostname:4242/doc?sslmode=require'
>>> _to_dsn('aiopg://myhostname')
'postgres://crate@myhostname:5432/doc'
|
def load_pyobj(name, pyobj):
'Return Sheet object of appropriate type for given sources in `args`.'
if isinstance(pyobj, list) or isinstance(pyobj, tuple):
if getattr(pyobj, '_fields', None):
return SheetNamedTuple(name, pyobj)
else:
return SheetList(name, pyobj)
elif isinstance(pyobj, dict):
return SheetDict(name, pyobj)
elif isinstance(pyobj, object):
return SheetObject(name, pyobj)
else:
error("cannot load '%s' as pyobj" % type(pyobj).__name__)
|
Return Sheet object of appropriate type for given sources in `args`.
|
def check_column(state, name, missing_msg=None, expand_msg=None):
if missing_msg is None:
missing_msg = "We expected to find a column named `{{name}}` in the result of your query, but couldn't."
if expand_msg is None:
expand_msg = "Have another look at your query result. "
msg_kwargs = {"name": name}
has_result(state)
stu_res = state.student_result
sol_res = state.solution_result
if name not in sol_res:
raise BaseException("name %s not in solution column names" % name)
if name not in stu_res:
_msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs)
state.do_test(_msg)
return state.to_child(
append_message={"msg": expand_msg, "kwargs": msg_kwargs},
student_result={name: stu_res[name]},
solution_result={name: sol_res[name]},
)
|
Zoom in on a particular column in the query result, by name.
After zooming in on a column, which is represented as a single-column query result,
you can use ``has_equal_value()`` to verify whether the column in the solution query result
matches the column in student query result.
Args:
name: name of the column to zoom in on.
missing_msg: if specified, this overrides the automatically generated feedback
message in case the column is missing in the student query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists``
* student : ``SELECT artist_id, name FROM artists``
We can write the following SCTs: ::
# fails, since no column named id in student result
Ex().check_column('id')
# passes, since a column named name is in student_result
Ex().check_column('name')
|
def style(self):
LOGGER.info('ANALYSIS : Styling')
classes = generate_classified_legend(
self.analysis_impacted,
self.exposure,
self.hazard,
self.use_rounding,
self.debug_mode)
hazard_class = hazard_class_field['key']
for layer in self._outputs():
without_geometries = [
QgsWkbTypes.NullGeometry,
QgsWkbTypes.UnknownGeometry]
if layer.geometryType() not in without_geometries:
display_not_exposed = False
if layer == self.impact or self.debug_mode:
display_not_exposed = True
if layer.keywords['inasafe_fields'].get(hazard_class):
hazard_class_style(layer, classes, display_not_exposed)
simple_polygon_without_brush(
self.aggregation_summary, aggregation_width, aggregation_color)
simple_polygon_without_brush(
self.analysis_impacted, analysis_width, analysis_color)
for layer in self._outputs():
layer.saveDefaultStyle()
|
Function to apply some styles to the layers.
|
def _repr_mimebundle_(self, *args, **kwargs):
chart = self.to_chart()
dct = chart.to_dict()
return alt.renderers.get()(dct)
|
Return a MIME bundle for display in Jupyter frontends.
|
def connect_database(url):
db = _connect_database(url)
db.copy = lambda: _connect_database(url)
return db
|
create database object by url
mysql:
mysql+type://user:passwd@host:port/database
sqlite:
# relative path
sqlite+type:///path/to/database.db
# absolute path
sqlite+type:////path/to/database.db
# memory database
sqlite+type://
mongodb:
mongodb+type://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
more: http://docs.mongodb.org/manual/reference/connection-string/
sqlalchemy:
sqlalchemy+postgresql+type://user:passwd@host:port/database
sqlalchemy+mysql+mysqlconnector+type://user:passwd@host:port/database
more: http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html
redis:
redis+taskdb://host:port/db
elasticsearch:
elasticsearch+type://host:port/?index=pyspider
local:
local+projectdb://filepath,filepath
type:
taskdb
projectdb
resultdb
|
def cli(env, identifier):
mgr = SoftLayer.NetworkManager(env.client)
subnet_id = helpers.resolve_id(mgr.resolve_subnet_ids, identifier,
name='subnet')
if not (env.skip_confirmations or formatting.no_going_back(subnet_id)):
raise exceptions.CLIAbort('Aborted')
mgr.cancel_subnet(subnet_id)
|
Cancel a subnet.
|
def calculate_localised_cost(self, d1, d2, neighbours, motions):
my_nbrs_with_motion = [n for n in neighbours[d1] if n in motions]
my_motion = (d1.center[0] - d2.center[0], d1.center[1] - d2.center[1])
if my_nbrs_with_motion == []:
distance = euclidean_dist(d1.center, d2.center) / self.scale
else:
distance = min([euclidean_dist(my_motion, motions[n]) for n in my_nbrs_with_motion]) / self.scale
area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area)
return distance + self.parameters_cost_iteration["area_weight"] * area_change
|
Calculates assignment cost between two cells taking into account the movement of cells neighbours.
:param CellFeatures d1: detection in first frame
:param CellFeatures d2: detection in second frame
|
def visit_delete(self, node):
return "del %s" % ", ".join(child.accept(self) for child in node.targets)
|
return an astroid.Delete node as string
|
def remove_image(self, image_id, force=False, noprune=False):
logger.info("removing image '%s' from filesystem", image_id)
logger.debug("image_id = '%s'", image_id)
if isinstance(image_id, ImageName):
image_id = image_id.to_str()
self.d.remove_image(image_id, force=force, noprune=noprune)
|
remove provided image from filesystem
:param image_id: str or ImageName
:param noprune: bool, keep untagged parents?
:param force: bool, force remove -- just trash it no matter what
:return: None
|
def _gmtime(timestamp):
try:
return time.gmtime(timestamp)
except OSError:
dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp)
dst = int(_isdst(dt))
return time.struct_time(dt.timetuple()[:8] + tuple([dst]))
|
Custom gmtime because yada yada.
|
def _generate_union_properties(self, fields):
for field in fields:
if not is_void_type(field.data_type):
doc = self.process_doc(
field.doc, self._docf) if field.doc else undocumented
warning_str = (
' @note Ensure the `is{}` method returns true before accessing, '
'otherwise a runtime exception will be raised.')
doc += warning_str.format(fmt_camel_upper(field.name))
self.emit_wrapped_text(
self.process_doc(doc, self._docf), prefix=comment_prefix)
self.emit(fmt_property(field=field))
self.emit()
|
Emits union instance properties from the given fields.
|
def _read_audio_data(self, file_path):
try:
self.log(u"Reading audio data...")
audio_file = AudioFile(
file_path=file_path,
file_format=self.OUTPUT_AUDIO_FORMAT,
rconf=self.rconf,
logger=self.logger
)
audio_file.read_samples_from_file()
self.log([u"Duration of '%s': %f", file_path, audio_file.audio_length])
self.log(u"Reading audio data... done")
return (True, (
audio_file.audio_length,
audio_file.audio_sample_rate,
audio_file.audio_format,
audio_file.audio_samples
))
except (AudioFileUnsupportedFormatError, OSError) as exc:
self.log_exc(u"An unexpected error occurred while reading audio data", exc, True, None)
return (False, None)
|
Read audio data from file.
:rtype: tuple (True, (duration, sample_rate, codec, data)) or (False, None) on exception
|
def get_instance_attribute(self, instance_id, attribute):
params = {'InstanceId' : instance_id}
if attribute:
params['Attribute'] = attribute
return self.get_object('DescribeInstanceAttribute', params,
InstanceAttribute, verb='POST')
|
Gets an attribute from an instance.
:type instance_id: string
:param instance_id: The Amazon id of the instance
:type attribute: string
:param attribute: The attribute you need information about
Valid choices are:
* instanceType|kernel|ramdisk|userData|
* disableApiTermination|
* instanceInitiatedShutdownBehavior|
* rootDeviceName|blockDeviceMapping
:rtype: :class:`boto.ec2.image.InstanceAttribute`
:return: An InstanceAttribute object representing the value of the
attribute requested
|
def get_context_data(self, **kwargs):
context = super(CrossTypeAnimalList, self).get_context_data(**kwargs)
context['list_type'] = self.kwargs['breeding_type']
return context
|
This add in the context of list_type and returns this as whatever the crosstype was.
|
def close_monomers(self, group, cutoff=4.0):
nearby_residues = []
for self_atom in self.atoms.values():
nearby_atoms = group.is_within(cutoff, self_atom)
for res_atom in nearby_atoms:
if res_atom.parent not in nearby_residues:
nearby_residues.append(res_atom.parent)
return nearby_residues
|
Returns a list of Monomers from within a cut off distance of the Monomer
Parameters
----------
group: BaseAmpal or Subclass
Group to be search for Monomers that are close to this Monomer.
cutoff: float
Distance cut off.
Returns
-------
nearby_residues: [Monomers]
List of Monomers within cut off distance.
|
def ppca(Y, Q, iterations=100):
from numpy.ma import dot as madot
N, D = Y.shape
W = np.random.randn(D, Q) * 1e-3
Y = np.ma.masked_invalid(Y, copy=0)
mu = Y.mean(0)
Ycentered = Y - mu
try:
for _ in range(iterations):
exp_x = np.asarray_chkfinite(np.linalg.solve(W.T.dot(W), madot(W.T, Ycentered.T))).T
W = np.asarray_chkfinite(np.linalg.solve(exp_x.T.dot(exp_x), madot(exp_x.T, Ycentered))).T
except np.linalg.linalg.LinAlgError:
pass
return np.asarray_chkfinite(exp_x), np.asarray_chkfinite(W)
|
EM implementation for probabilistic pca.
:param array-like Y: Observed Data
:param int Q: Dimensionality for reduced array
:param int iterations: number of iterations for EM
|
def prepare_axes(axes, title, size, cmap=None):
if axes is None:
return None
axes.set_xlim([0, size[1]])
axes.set_ylim([size[0], 0])
axes.set_aspect('equal')
axes.axis('off')
if isinstance(cmap, str):
title = '{} (cmap: {})'.format(title, cmap)
axes.set_title(title)
axes_image = image.AxesImage(axes, cmap=cmap,
extent=(0, size[1], size[0], 0))
axes_image.set_data(np.random.random((size[0], size[1], 3)))
axes.add_image(axes_image)
return axes_image
|
Prepares an axes object for clean plotting.
Removes x and y axes labels and ticks, sets the aspect ratio to be
equal, uses the size to determine the drawing area and fills the image
with random colors as visual feedback.
Creates an AxesImage to be shown inside the axes object and sets the
needed properties.
Args:
axes: The axes object to modify.
title: The title.
size: The size of the expected image.
cmap: The colormap if a custom color map is needed.
(Default: None)
Returns:
The AxesImage's handle.
|
def servers(self):
url = "%s/servers" % self.root
return Servers(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
|
gets the federated or registered servers for Portal
|
def pelix_bundles(self):
framework = self.__context.get_framework()
return {
bundle.get_bundle_id(): {
"name": bundle.get_symbolic_name(),
"version": bundle.get_version(),
"state": bundle.get_state(),
"location": bundle.get_location(),
}
for bundle in framework.get_bundles()
}
|
List of installed bundles
|
def _correct_qualimap_genome_results(samples):
for s in samples:
if verify_file(s.qualimap_genome_results_fpath):
correction_is_needed = False
with open(s.qualimap_genome_results_fpath, 'r') as f:
content = f.readlines()
metrics_started = False
for line in content:
if ">> Reference" in line:
metrics_started = True
if metrics_started:
if line.find(',') != -1:
correction_is_needed = True
break
if correction_is_needed:
with open(s.qualimap_genome_results_fpath, 'w') as f:
metrics_started = False
for line in content:
if ">> Reference" in line:
metrics_started = True
if metrics_started:
if line.find(',') != -1:
line = line.replace(',', '')
f.write(line)
|
fixing java.lang.Double.parseDouble error on entries like "6,082.49"
|
def pair_looper(iterator):
left = START
for item in iterator:
if left is not START:
yield (left, item)
left = item
|
Loop through iterator yielding items in adjacent pairs
|
def top(self):
for child in self.children(skip_not_present=False):
if not isinstance(child, AddrmapNode):
continue
return child
raise RuntimeError
|
Returns the top-level addrmap node
|
def get_batched(portal_type=None, uid=None, endpoint=None, **kw):
results = get_search_results(portal_type=portal_type, uid=uid, **kw)
size = req.get_batch_size()
start = req.get_batch_start()
complete = req.get_complete(default=_marker)
if complete is _marker:
complete = uid and True or False
return get_batch(results, size, start, endpoint=endpoint,
complete=complete)
|
Get batched results
|
def _load_scratch_orgs(self):
current_orgs = self.list_orgs()
if not self.project_config.orgs__scratch:
return
for config_name in self.project_config.orgs__scratch.keys():
if config_name in current_orgs:
continue
self.create_scratch_org(config_name, config_name)
|
Creates all scratch org configs for the project in the keychain if
a keychain org doesn't already exist
|
def lastId(self) -> BaseReference:
if self.childIds is not None:
if len(self.childIds) > 0:
return self.childIds[-1]
return None
else:
raise NotImplementedError
|
Last child's id of current TextualNode
|
def _create_dict_with_nested_keys_and_val(cls, keys, value):
if len(keys) > 1:
new_keys = keys[:-1]
new_val = {keys[-1]: value}
return cls._create_dict_with_nested_keys_and_val(new_keys, new_val)
elif len(keys) == 1:
return {keys[0]: value}
else:
raise ValueError('Keys must contain at least one key.')
|
Recursively constructs a nested dictionary with the keys pointing to the value.
For example:
Given the list of keys ['a', 'b', 'c', 'd'] and a primitive
value 'hello world', the method will produce the nested dictionary
{'a': {'b': {'c': {'d': 'hello world'}}}}. The number of keys in the list
defines the depth of the nested dict. If the list of keys is ['a'] and
the value is 'hello world', then the result would be {'a': 'hello world'}.
:param list of string keys: A list of keys to be nested as a dictionary.
:param primitive value: The value of the information being stored.
:return: dict of nested keys leading to the value.
|
def convolve_stack(data, kernel, rot_kernel=False, method='scipy'):
r
if rot_kernel:
kernel = rotate_stack(kernel)
return np.array([convolve(data_i, kernel_i, method=method) for data_i,
kernel_i in zip(data, kernel)])
|
r"""Convolve stack of data with stack of kernels
This method convolves the input data with a given kernel using FFT and
is the default convolution used for all routines
Parameters
----------
data : np.ndarray
Input data array, normally a 2D image
kernel : np.ndarray
Input kernel array, normally a 2D kernel
rot_kernel : bool
Option to rotate kernels by 180 degrees
method : str {'astropy', 'scipy'}, optional
Convolution method (default is 'scipy')
Returns
-------
np.ndarray convolved data
Examples
--------
>>> from math.convolve import convolve
>>> import numpy as np
>>> a = np.arange(18).reshape(2, 3, 3)
>>> b = a + 10
>>> convolve_stack(a, b)
array([[[ 534., 525., 534.],
[ 453., 444., 453.],
[ 534., 525., 534.]],
<BLANKLINE>
[[ 2721., 2712., 2721.],
[ 2640., 2631., 2640.],
[ 2721., 2712., 2721.]]])
>>> convolve_stack(a, b, rot_kernel=True)
array([[[ 474., 483., 474.],
[ 555., 564., 555.],
[ 474., 483., 474.]],
<BLANKLINE>
[[ 2661., 2670., 2661.],
[ 2742., 2751., 2742.],
[ 2661., 2670., 2661.]]])
See Also
--------
convolve : The convolution function called by convolve_stack
|
def do(self):
'Do or redo the action'
self._runner = self._generator(*self.args, **self.kwargs)
rets = next(self._runner)
if isinstance(rets, tuple):
self._text = rets[0]
return rets[1:]
elif rets is None:
self._text = ''
return None
else:
self._text = rets
return None
|
Do or redo the action
|
def hessian(self, theta_x, theta_y, kwargs_lens, k=None, diff=0.00000001):
alpha_ra, alpha_dec = self.alpha(theta_x, theta_y, kwargs_lens)
alpha_ra_dx, alpha_dec_dx = self.alpha(theta_x + diff, theta_y, kwargs_lens)
alpha_ra_dy, alpha_dec_dy = self.alpha(theta_x, theta_y + diff, kwargs_lens)
dalpha_rara = (alpha_ra_dx - alpha_ra)/diff
dalpha_radec = (alpha_ra_dy - alpha_ra)/diff
dalpha_decra = (alpha_dec_dx - alpha_dec)/diff
dalpha_decdec = (alpha_dec_dy - alpha_dec)/diff
f_xx = dalpha_rara
f_yy = dalpha_decdec
f_xy = dalpha_radec
f_yx = dalpha_decra
return f_xx, f_xy, f_yx, f_yy
|
computes the hessian components f_xx, f_yy, f_xy from f_x and f_y with numerical differentiation
:param theta_x: x-position (preferentially arcsec)
:type theta_x: numpy array
:param theta_y: y-position (preferentially arcsec)
:type theta_y: numpy array
:param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes
:param diff: numerical differential step (float)
:return: f_xx, f_xy, f_yx, f_yy
|
def retweet(self, id):
try:
self._client.retweet(id=id)
return True
except TweepError as e:
if e.api_code == TWITTER_PAGE_DOES_NOT_EXISTS_ERROR:
return False
raise
|
Retweet a tweet.
:param id: ID of the tweet in question
:return: True if success, False otherwise
|
def update_lbaas_healthmonitor(self, lbaas_healthmonitor, body=None):
return self.put(self.lbaas_healthmonitor_path % (lbaas_healthmonitor),
body=body)
|
Updates a lbaas_healthmonitor.
|
def reverse_segment(path, n1, n2):
q = path.copy()
if n2 > n1:
q[n1:(n2+1)] = path[n1:(n2+1)][::-1]
return q
else:
seg = np.hstack((path[n1:], path[:(n2+1)]))[::-1]
brk = len(q) - n1
q[n1:] = seg[:brk]
q[:(n2+1)] = seg[brk:]
return q
|
Reverse the nodes between n1 and n2.
|
def update_multi_precision(self, index, weight, grad, state):
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state)
|
Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
|
def import_object(name: str) -> Any:
if name.count(".") == 0:
return __import__(name)
parts = name.split(".")
obj = __import__(".".join(parts[:-1]), fromlist=[parts[-1]])
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
|
Imports an object by name.
``import_object('x')`` is equivalent to ``import x``.
``import_object('x.y.z')`` is equivalent to ``from x.y import z``.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
|
def _get_button_label(self):
dlg = wx.TextEntryDialog(self, _('Button label:'))
if dlg.ShowModal() == wx.ID_OK:
label = dlg.GetValue()
else:
label = ""
dlg.Destroy()
return label
|
Gets Button label from user and returns string
|
def _get_method_kwargs(self):
method_kwargs = {
'user': self.user,
'content_type': self.ctype,
'object_id': self.content_object.pk,
}
return method_kwargs
|
Helper method. Returns kwargs needed to filter the correct object.
Can also be used to create the correct object.
|
def get_project_export(self, project_id):
try:
result = self._request('/getprojectexport/',
{'projectid': project_id})
return TildaProject(**result)
except NetworkError:
return []
|
Get project info for export
|
def streaming_client(self, tasks_regex, tasks_negate, workers_regex, workers_negate):
cc = CapturingClient(Queue(),
re.compile(tasks_regex), tasks_negate,
re.compile(workers_regex), workers_negate)
self.observers.append(cc)
yield cc.queue
self.observers.remove(cc)
|
Connects a client to the streaming capture, filtering the events that are sent
to it.
Args:
tasks_regex (str): a pattern to filter tasks to capture.
ex.: '^dispatch|^email' to filter names starting with that
or 'dispatch.*123456' to filter that exact name and number
or even '123456' to filter that exact number anywhere.
tasks_negate (bool): if True, finds tasks that do not match criteria
workers_regex (str): a pattern to filter workers to capture.
ex.: 'service|priority' to filter names containing that
workers_negate (bool): if True, finds workers that do not match criteria
|
def _encode_query(query):
if query == '':
return query
query_args = []
for query_kv in query.split('&'):
k, v = query_kv.split('=')
query_args.append(k + "=" + quote(v.encode('utf-8')))
return '&'.join(query_args)
|
Quote all values of a query string.
|
def get_object_handle(self, obj):
if obj not in self._object_handles:
self._object_handles[obj] = self._get_object_handle(obj=obj)
return self._object_handles[obj]
|
Gets the vrep object handle.
|
def install(self, host):
print("Installing..")
if self._state["installed"]:
return
if self.is_headless():
log.info("Headless host")
return
print("aboutToQuit..")
self.app.aboutToQuit.connect(self._on_application_quit)
if host == "Maya":
print("Maya host..")
window = {
widget.objectName(): widget
for widget in self.app.topLevelWidgets()
}["MayaWindow"]
else:
window = self.find_window()
print("event filter..")
event_filter = self.EventFilter(window)
window.installEventFilter(event_filter)
for signal in SIGNALS_TO_REMOVE_EVENT_FILTER:
pyblish.api.register_callback(signal, self.uninstall)
log.info("Installed event filter")
self.window = window
self._state["installed"] = True
self._state["eventFilter"] = event_filter
|
Setup common to all Qt-based hosts
|
def get_by_symbol(self, symbol: str) -> Commodity:
assert isinstance(symbol, str)
query = (
self.currencies_query
.filter(Commodity.mnemonic == symbol)
)
return query.one()
|
Loads currency by symbol
|
def poll(self, timeout=None):
p = select.poll()
p.register(self._fd, select.POLLIN | select.POLLPRI)
events = p.poll(int(timeout * 1000))
if len(events) > 0:
return True
return False
|
Poll for data available for reading from the serial port.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Default is
a blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if data is available for reading from the serial port, ``False`` if not.
|
def tokenize(code):
tok_regex = '|'.join('(?P<{}>{})'.format(*pair) for pair in _tokens)
tok_regex = re.compile(tok_regex, re.IGNORECASE|re.M)
line_num = 1
line_start = 0
for mo in re.finditer(tok_regex, code):
kind = mo.lastgroup
value = mo.group(kind)
if kind == 'NEWLINE':
line_start = mo.end()
line_num += 1
elif kind == 'SKIP' or value=='':
pass
else:
column = mo.start() - line_start
yield Token(kind, value, line_num, column)
|
Tokenize the string `code`
|
def lock_area(self, code, index):
logger.debug("locking area code %s index %s" % (code, index))
return self.library.Srv_LockArea(self.pointer, code, index)
|
Locks a shared memory area.
|
def install_reqs(venv, repo_dest):
with dir_path(repo_dest):
args = ['-r', 'requirements/compiled.txt']
if not verbose:
args.insert(0, '-q')
subprocess.check_call([os.path.join(venv, 'bin', 'pip'), 'install'] +
args)
|
Installs all compiled requirements that can't be shipped in vendor.
|
def add_prop_descriptor_to_class(self, class_name, new_class_attrs, names_with_refs, container_names, dataspecs):
from .bases import ContainerProperty
from .dataspec import DataSpec
name = self.name
if name in new_class_attrs:
raise RuntimeError("Two property generators both created %s.%s" % (class_name, name))
new_class_attrs[name] = self
if self.has_ref:
names_with_refs.add(name)
if isinstance(self, BasicPropertyDescriptor):
if isinstance(self.property, ContainerProperty):
container_names.add(name)
if isinstance(self.property, DataSpec):
dataspecs[name] = self
|
``MetaHasProps`` calls this during class creation as it iterates
over properties to add, to update its registry of new properties.
The parameters passed in are mutable and this function is expected to
update them accordingly.
Args:
class_name (str) :
name of the class this descriptor is added to
new_class_attrs(dict[str, PropertyDescriptor]) :
mapping of attribute names to PropertyDescriptor that this
function will update
names_with_refs (set[str]) :
set of all property names for properties that also have
references, that this function will update
container_names (set[str]) :
set of all property names for properties that are
container props, that this function will update
dataspecs(dict[str, PropertyDescriptor]) :
mapping of attribute names to PropertyDescriptor for DataSpec
properties that this function will update
Return:
None
|
def error_router(self, original_handler, e):
if self._has_fr_route():
try:
return self.handle_error(e)
except Exception:
pass
return original_handler(e)
|
This function decides whether the error occured in a flask-restful
endpoint or not. If it happened in a flask-restful endpoint, our
handler will be dispatched. If it happened in an unrelated view, the
app's original error handler will be dispatched.
In the event that the error occurred in a flask-restful endpoint but
the local handler can't resolve the situation, the router will fall
back onto the original_handler as last resort.
:param original_handler: the original Flask error handler for the app
:type original_handler: function
:param e: the exception raised while handling the request
:type e: Exception
|
def crl_distribution_points(self):
if self._crl_distribution_points is None:
self._crl_distribution_points = self._get_http_crl_distribution_points(self.crl_distribution_points_value)
return self._crl_distribution_points
|
Returns complete CRL URLs - does not include delta CRLs
:return:
A list of zero or more DistributionPoint objects
|
def _init_params_default(self):
Yimp = self.Y.copy()
Inan = sp.isnan(Yimp)
Yimp[Inan] = Yimp[~Inan].mean()
if self.P==1: C = sp.array([[Yimp.var()]])
else: C = sp.cov(Yimp.T)
C /= float(self.n_randEffs)
for ti in range(self.n_randEffs):
self.getTraitCovarFun(ti).setCovariance(C)
|
Internal method for default parameter initialization
|
def delete_refund(self, refund_id):
request = self._delete('transactions/refunds/' + str(refund_id))
return self.responder(request)
|
Deletes an existing refund transaction.
|
def check_initializers(initializers, keys):
if initializers is None:
return {}
_assert_is_dictlike(initializers, valid_keys=keys)
keys = set(keys)
if not set(initializers) <= keys:
extra_keys = set(initializers) - keys
raise KeyError(
"Invalid initializer keys {}, initializers can only "
"be provided for {}".format(
", ".join("'{}'".format(key) for key in extra_keys),
", ".join("'{}'".format(key) for key in keys)))
_check_nested_callables(initializers, "Initializer")
return dict(initializers)
|
Checks the given initializers.
This checks that `initializers` is a dictionary that only contains keys in
`keys`, and furthermore the entries in `initializers` are functions or
further dictionaries (the latter used, for example, in passing initializers
to modules inside modules) that must satisfy the same constraints.
Args:
initializers: Dictionary of initializers (allowing nested dictionaries) or
None.
keys: Iterable of valid keys for `initializers`.
Returns:
Copy of checked dictionary of initializers. If `initializers=None`, an empty
dictionary will be returned.
Raises:
KeyError: If an initializer is provided for a key not in `keys`.
TypeError: If a provided initializer is not a callable function, or
`initializers` is not a Mapping.
|
def visit_compare(self, node, parent):
newnode = nodes.Compare(node.lineno, node.col_offset, parent)
newnode.postinit(
self.visit(node.left, newnode),
[
(self._cmp_op_classes[op.__class__], self.visit(expr, newnode))
for (op, expr) in zip(node.ops, node.comparators)
],
)
return newnode
|
visit a Compare node by returning a fresh instance of it
|
def add_line(self, line):
if not self.is_valid_line(line):
logger.warn(
"Invalid line for %s section: '%s'",
self.section_name, line
)
return
self.lines.append(line)
|
Adds a given line string to the list of lines, validating the line
first.
|
def _most_common(iterable):
data = Counter(iterable)
return max(data, key=data.__getitem__)
|
Returns the most common element in `iterable`.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.