code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
|---|---|
def from_config(cls, cp, variable_params):
if not cp.has_section('sampling_params'):
raise ValueError("no sampling_params section found in config file")
sampling_params, replace_parameters = \
read_sampling_params_from_config(cp)
sampling_transforms = transforms.read_transforms_from_config(
cp, 'sampling_transforms')
logging.info("Sampling in {} in place of {}".format(
', '.join(sampling_params), ', '.join(replace_parameters)))
return cls(variable_params, sampling_params,
replace_parameters, sampling_transforms)
|
Gets sampling transforms specified in a config file.
Sampling parameters and the parameters they replace are read from the
``sampling_params`` section, if it exists. Sampling transforms are
read from the ``sampling_transforms`` section(s), using
``transforms.read_transforms_from_config``.
An ``AssertionError`` is raised if no ``sampling_params`` section
exists in the config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of parameter names of the original variable params.
Returns
-------
SamplingTransforms
A sampling transforms class.
|
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.endpoint)
reactor.callFromThread(self.connector.disconnect)
log.debug("Closed socket to %s", self.endpoint)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.endpoint))
self.connected_event.set()
|
Disconnect and error-out all requests.
|
def open_bare_resource(self, resource_name,
access_mode=constants.AccessModes.no_lock,
open_timeout=constants.VI_TMO_IMMEDIATE):
return self.visalib.open(self.session, resource_name, access_mode, open_timeout)
|
Open the specified resource without wrapping into a class
:param resource_name: name or alias of the resource to open.
:param access_mode: access mode.
:type access_mode: :class:`pyvisa.constants.AccessModes`
:param open_timeout: time out to open.
:return: Unique logical identifier reference to a session.
|
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiled_func
|
Decorator to profile a function
It gives good numbers on various function calls but it omits a vital piece
of information: what is it about a function that makes it so slow?
However, it is a great start to basic profiling. Sometimes it can even
point you to the solution with very little fuss. I often use it as a
gut check to start the debugging process before I dig deeper into the
specific functions that are either slow are called way too often.
Pros:
No external dependencies and quite fast. Useful for quick high-level
checks.
Cons:
Rather limited information that usually requires deeper debugging; reports
are a bit unintuitive, especially for complex codebases.
See also
--------
do_profile, test_do_profile
|
def infile_path(self) -> Optional[PurePath]:
if not self.__infile_path:
return Path(self.__infile_path).expanduser()
return None
|
Read-only property.
:return: A ``pathlib.PurePath`` object or ``None``.
|
def GetVersionNamespace(version):
ns = nsMap[version]
if not ns:
ns = serviceNsMap[version]
versionId = versionIdMap[version]
if not versionId:
namespace = ns
else:
namespace = '%s/%s' % (ns, versionId)
return namespace
|
Get version namespace from version
|
def write_text(_command, txt_file):
command = _command.strip()
with open(txt_file, 'w') as txt:
txt.writelines(command)
|
Dump SQL command to a text file.
|
def expand_matrix_in_orthogonal_basis(
m: np.ndarray,
basis: Dict[str, np.ndarray],
) -> value.LinearDict[str]:
return value.LinearDict({
name: (hilbert_schmidt_inner_product(b, m) /
hilbert_schmidt_inner_product(b, b))
for name, b in basis.items()
})
|
Computes coefficients of expansion of m in basis.
We require that basis be orthogonal w.r.t. the Hilbert-Schmidt inner
product. We do not require that basis be orthonormal. Note that Pauli
basis (I, X, Y, Z) is orthogonal, but not orthonormal.
|
def fetch_single_representation(self, item_xlink_href):
journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}',
'pcbi': 'http://www.ploscompbiol.org/article/{0}',
'ppat': 'http://www.plospathogens.org/article/{0}',
'pntd': 'http://www.plosntds.org/article/{0}',
'pmed': 'http://www.plosmedicine.org/article/{0}',
'pbio': 'http://www.plosbiology.org/article/{0}',
'pone': 'http://www.plosone.org/article/{0}',
'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'}
subjournal_name = self.article.doi.split('.')[2]
base_url = journal_urls[subjournal_name]
resource = 'fetchSingleRepresentation.action?uri=' + item_xlink_href
return base_url.format(resource)
|
This function will render a formatted URL for accessing the PLoS' server
SingleRepresentation of an object.
|
def freefn(self, item, fn, at_tail):
return c_void_p(lib.zlist_freefn(self._as_parameter_, item, fn, at_tail))
|
Set a free function for the specified list item. When the item is
destroyed, the free function, if any, is called on that item.
Use this when list items are dynamically allocated, to ensure that
you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
Returns the item, or NULL if there is no such item.
|
def animate(self, images, delay=.25):
for image in images:
self.set_image(image)
self.write_display()
time.sleep(delay)
|
Displays each of the input images in order, pausing for "delay"
seconds after each image.
Keyword arguments:
image -- An iterable collection of Image objects.
delay -- How many seconds to wait after displaying an image before
displaying the next one. (Default = .25)
|
def protract(self, x):
self.active = self.active.protract(x)
return self.active
|
Protract each of the `active` `Segments` by ``x`` seconds.
This method subtracts ``x`` from each segment's lower bound,
and adds ``x`` to the upper bound, while maintaining that each
`Segment` stays within the `known` bounds.
The :attr:`~DataQualityFlag.active` `SegmentList` is modified
in place.
Parameters
----------
x : `float`
number of seconds by which to protact each `Segment`.
|
def find(self, id):
for sprite in self.sprites:
if sprite.id == id:
return sprite
for sprite in self.sprites:
found = sprite.find(id)
if found:
return found
|
breadth-first sprite search by ID
|
def get_elastic_page_numbers(current_page, num_pages):
if num_pages <= 10:
return list(range(1, num_pages + 1))
if current_page == 1:
pages = [1]
else:
pages = ['first', 'previous']
pages.extend(_make_elastic_range(1, current_page))
if current_page != num_pages:
pages.extend(_make_elastic_range(current_page, num_pages)[1:])
pages.extend(['next', 'last'])
return pages
|
Alternative callable for page listing.
Produce an adaptive pagination, useful for big numbers of pages, by
splitting the num_pages ranges in two parts at current_page. Each part
will have its own S-curve.
|
def search_function(cls, encoding):
if encoding == cls._codec_name:
return codecs.CodecInfo(
name=cls._codec_name,
encode=cls.encode,
decode=cls.decode,
)
return None
|
Search function to find 'rotunicode' codec.
|
def setup_package():
import json
from setuptools import setup, find_packages
filename_setup_json = 'setup.json'
filename_description = 'README.md'
with open(filename_setup_json, 'r') as handle:
setup_json = json.load(handle)
with open(filename_description, 'r') as handle:
description = handle.read()
setup(
include_package_data=True,
packages=find_packages(),
setup_requires=['reentry'],
reentry_register=True,
long_description=description,
long_description_content_type='text/markdown',
**setup_json)
|
Setup procedure.
|
def to_json_serializable(obj):
if isinstance(obj, Entity):
return obj.to_json_dict()
if isinstance(obj, dict):
return {k: to_json_serializable(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return [to_json_serializable(v) for v in obj]
elif isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
return obj
|
Transforms obj into a json serializable object.
:param obj: entity or any json serializable object
:return: serializable object
|
def render_to_response(self, context):
self.setup_forms()
return TemplateResponse(
self.request, self.form_template,
context, current_app=self.admin_site.name)
|
Add django-crispy form helper and draw the template
Returns the ``TemplateResponse`` ready to be displayed
|
def AddAttribute(self, attribute, value=None, age=None):
if "w" not in self.mode:
raise IOError("Writing attribute %s to read only object." % attribute)
if value is None:
value = attribute
attribute = value.attribute_instance
if self.mode != "w" and attribute.lock_protected and not self.transaction:
raise IOError("Object must be locked to write attribute %s." % attribute)
self._CheckAttribute(attribute, value)
if attribute.versioned:
if attribute.creates_new_object_version:
self._new_version = True
if age:
value.age = age
else:
value.age = rdfvalue.RDFDatetime.Now()
else:
self._to_delete.add(attribute)
self.synced_attributes.pop(attribute, None)
self.new_attributes.pop(attribute, None)
value.age = 0
self._AddAttributeToCache(attribute, value, self.new_attributes)
self._dirty = True
|
Add an additional attribute to this object.
If value is None, attribute is expected to be already initialized with a
value. For example:
fd.AddAttribute(fd.Schema.CONTAINS("some data"))
Args:
attribute: The attribute name or an RDFValue derived from the attribute.
value: The value the attribute will be set to.
age: Age (timestamp) of the attribute. If None, current time is used.
Raises:
IOError: If this object is read only.
|
def listar_por_equipamento(self, id_equipamento):
if not is_valid_int_param(id_equipamento):
raise InvalidParameterError(
u'Equipment id is invalid or was not informed.')
url = 'interface/equipamento/' + str(id_equipamento) + '/'
code, map = self.submit(None, 'GET', url)
key = 'interface'
return get_list_map(self.response(code, map, [key]), key)
|
List all interfaces of an equipment.
:param id_equipamento: Equipment identifier.
:return: Dictionary with the following:
::
{'interface':
[{'protegida': < protegida >,
'nome': < nome >,
'id_ligacao_front': < id_ligacao_front >,
'id_equipamento': < id_equipamento >,
'id': < id >,
'descricao': < descricao >,
'id_ligacao_back': < id_ligacao_back >}, ... other interfaces ...]}
:raise InvalidParameterError: Equipment identifier is invalid or none.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
|
def convert_svhn(which_format, directory, output_directory,
output_filename=None):
if which_format not in (1, 2):
raise ValueError("SVHN format needs to be either 1 or 2.")
if not output_filename:
output_filename = 'svhn_format_{}.hdf5'.format(which_format)
if which_format == 1:
return convert_svhn_format_1(
directory, output_directory, output_filename)
else:
return convert_svhn_format_2(
directory, output_directory, output_filename)
|
Converts the SVHN dataset to HDF5.
Converts the SVHN dataset [SVHN] to an HDF5 dataset compatible
with :class:`fuel.datasets.SVHN`. The converted dataset is
saved as 'svhn_format_1.hdf5' or 'svhn_format_2.hdf5', depending
on the `which_format` argument.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
Parameters
----------
which_format : int
Either 1 or 2. Determines which format (format 1: full numbers
or format 2: cropped digits) to convert.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5' or
'svhn_format_2.hdf5', depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
def get_issuer_keys(self, issuer):
res = []
for kbl in self.issuer_keys[issuer]:
res.extend(kbl.keys())
return res
|
Get all the keys that belong to an entity.
:param issuer: The entity ID
:return: A possibly empty list of keys
|
def update_config(self, config, timeout=-1):
return self._client.update(config, uri=self.URI + "/config", timeout=timeout)
|
Updates the remote server configuration and the automatic backup schedule for backup.
Args:
config (dict): Object to update.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Backup details.
|
def path(args):
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.paths(args.id, prefix=args.directory, suffix=args.extension)
for path in r: output.write('%s\n' % path)
if not r: return 1
return 0
|
Returns a list of fully formed paths or stems given some file id
|
def _bse_cli_get_basis(args):
return api.get_basis(
name=args.basis,
elements=args.elements,
version=args.version,
fmt=args.fmt,
uncontract_general=args.unc_gen,
uncontract_spdf=args.unc_spdf,
uncontract_segmented=args.unc_seg,
make_general=args.make_gen,
optimize_general=args.opt_gen,
data_dir=args.data_dir,
header=not args.noheader)
|
Handles the get-basis subcommand
|
def feature_passthrough(early_feat, late_feat, filters, name, kernel_size=(1, 1)):
_, h_early, w_early, c_early = early_feat.get_shape().as_list()
_, h_late, w_late, c_late = late_feat.get_shape().as_list()
s_x = int(w_early / w_late)
s_y = int(h_early / h_late)
assert h_late * s_y == h_early and w_late * s_x == w_early
with tf.variable_scope(name) as scope:
early_conv = tf.layers.conv2d(early_feat, filters=filters, kernel_size=(s_x * kernel_size[0], s_y * kernel_size[1]), strides=(s_x, s_y), padding="same")
late_conv = tf.layers.conv2d(late_feat, filters=filters, kernel_size=kernel_size, strides=(1, 1), padding="same")
return early_conv + late_conv
|
A feature passthrough layer inspired by yolo9000 and the inverse tiling layer.
It can be proven, that this layer does the same as conv(concat(inverse_tile(early_feat), late_feat)).
This layer has no activation function.
:param early_feat: The early feature layer of shape [batch_size, h * s_x, w * s_y, _].
s_x and s_y are integers computed internally describing the scale between the layers.
:param late_feat: The late feature layer of shape [batch_size, h, w, _].
:param filters: The number of convolution filters.
:param name: The name of the layer.
:param kernel_size: The size of the kernel. Default (1x1).
:return: The output tensor of shape [batch_size, h, w, outputs]
|
def handle_endtag(self, tagName):
inTag = self._inTag
try:
foundIt = False
for i in range(len(inTag)):
if inTag[i].tagName == tagName:
foundIt = True
break
if not foundIt:
sys.stderr.write('WARNING: found close tag with no matching start.\n')
return
while inTag[-1].tagName != tagName:
oldTag = inTag.pop()
if oldTag.tagName in PREFORMATTED_TAGS:
self.inPreformatted -= 1
self.currentIndentLevel -= 1
inTag.pop()
if tagName != INVISIBLE_ROOT_TAG:
self.currentIndentLevel -= 1
if tagName in PREFORMATTED_TAGS:
self.inPreformatted -= 1
except:
pass
|
handle_endtag - Internal for parsing
|
def encode_timeseries_put(self, tsobj):
if tsobj.columns:
raise NotImplementedError('columns are not used')
if tsobj.rows and isinstance(tsobj.rows, list):
req_rows = []
for row in tsobj.rows:
req_r = []
for cell in row:
req_r.append(self.encode_to_ts_cell(cell))
req_rows.append(tuple(req_r))
req = tsputreq_a, tsobj.table.name, [], req_rows
mc = MSG_CODE_TS_TTB_MSG
rc = MSG_CODE_TS_TTB_MSG
return Msg(mc, encode(req), rc)
else:
raise RiakError("TsObject requires a list of rows")
|
Returns an Erlang-TTB encoded tuple with the appropriate data and
metadata from a TsObject.
:param tsobj: a TsObject
:type tsobj: TsObject
:rtype: term-to-binary encoded object
|
def normalize_name(name, overrides=None):
normalized_name = name.title()
if overrides:
override_map = dict([(name.title(), name) for name in overrides])
return override_map.get(normalized_name, normalized_name)
else:
return normalized_name
|
Normalize the key name to title case.
For example, ``normalize_name('content-id')`` will become ``Content-Id``
Args:
name (str): The name to normalize.
overrides (set, sequence): A set or sequence containing keys that
should be cased to themselves. For example, passing
``set('WARC-Type')`` will normalize any key named "warc-type" to
``WARC-Type`` instead of the default ``Warc-Type``.
Returns:
str
|
def compute_bbox_with_margins(margin, x, y):
'Helper function to compute bounding box for the plot'
pos = np.asarray((x, y))
minxy, maxxy = pos.min(axis=1), pos.max(axis=1)
xy1 = minxy - margin*(maxxy - minxy)
xy2 = maxxy + margin*(maxxy - minxy)
return tuple(xy1), tuple(xy2)
|
Helper function to compute bounding box for the plot
|
def set_value(self, value, force=False):
if force:
self._value = value
return
if value is None:
self._value = value
return
if isinstance(value, six.integer_types):
self._value = value
return
if isinstance(value, six.string_types):
for v, n in self.enums.items():
if n == value:
self._value = v
return
raise ValueError("Unable to find value name in enum list")
raise TypeError(
"Value for '%s' must by of type String or Integer not '%s'" % (
self.name,
type(value)
)
)
|
Set the value.
:param String|Integer value: The value to set. Must be in the enum list.
:param Boolean force: Set the value without checking it
:raises ValueError: If value name given but it isn't available
:raises TypeError: If value is not String or Integer
|
def verify(self, payload):
if not self.authenticator:
return payload
try:
self.authenticator.auth(payload)
return self.authenticator.unsigned(payload)
except AuthenticatorInvalidSignature:
raise
except Exception as exception:
raise AuthenticateError(str(exception))
|
Verify payload authenticity via the supplied authenticator
|
def maybe_create_placement_group(name='', max_retries=10):
if not name:
return
client = get_ec2_client()
while True:
try:
client.describe_placement_groups(GroupNames=[name])
print("Reusing placement_group group: " + name)
break
except Exception:
print("Creating placement_group group: " + name)
try:
_response = client.create_placement_group(GroupName=name,
Strategy='cluster')
except Exception:
pass
counter = 0
while True:
try:
res = client.describe_placement_groups(GroupNames=[name])
res_entry = res['PlacementGroups'][0]
if res_entry['State'] == 'available':
assert res_entry['Strategy'] == 'cluster'
break
except Exception as e:
print("Got exception: %s" % (e,))
counter += 1
if counter >= max_retries:
assert False, f'Failed to create placement_group group {name} in {max_retries} attempts'
time.sleep(RETRY_INTERVAL_SEC)
|
Creates placement_group group or reuses existing one. Crash if unable to create
placement_group group. If name is empty, ignores request.
|
def delete(self, photo_id, album_id=0):
if isinstance(photo_id, Info):
photo_id = photo_id.id
return self._session.okc_post('photoupload', data={
'albumid': album_id,
'picid': photo_id,
'authcode': self._authcode,
'picture.delete_ajax': 1
})
|
Delete a photo from the logged in users account.
:param photo_id: The okcupid id of the photo to delete.
:param album_id: The album from which to delete the photo.
|
async def certify(client: Client, certification_signed_raw: str) -> ClientResponse:
return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP)
|
POST certification raw document
:param client: Client to connect to the api
:param certification_signed_raw: Certification raw document
:return:
|
def _build_date_time_time_zone(self, date_time):
timezone = date_time.tzinfo.zone if date_time.tzinfo is not None else None
return {
self._cc('dateTime'): date_time.strftime('%Y-%m-%dT%H:%M:%S'),
self._cc('timeZone'): get_windows_tz(timezone or self.protocol.timezone)
}
|
Converts a datetime to a dateTimeTimeZone resource
|
def serialize_to_normalized_pretty_json(py_obj):
return json.dumps(py_obj, sort_keys=True, indent=2, cls=ToJsonCompatibleTypes)
|
Serialize a native object to normalized, pretty printed JSON.
The JSON string is normalized by sorting any dictionary keys.
Args:
py_obj: object
Any object that can be represented in JSON. Some types, such as datetimes are
automatically converted to strings.
Returns:
str: normalized, pretty printed JSON string.
|
def from_dict(cls, d):
name = PartitionIdentity._name_class(**d)
if 'id' in d and 'revision' in d:
on = (ObjectNumber.parse(d['id']).rev(d['revision']))
elif 'vid' in d:
on = ObjectNumber.parse(d['vid'])
else:
raise ValueError("Must have id and revision, or vid")
try:
return PartitionIdentity(name, on)
except TypeError as e:
raise TypeError(
"Failed to make identity from \n{}\n: {}".format(
d,
e.message))
|
Like Identity.from_dict, but will cast the class type based on the
format. i.e. if the format is hdf, return an HdfPartitionIdentity.
:param d:
:return:
|
def load_image(filename, timeout=120):
c = docker_fabric()
with open(expand_path(filename), 'r') as f:
_timeout = c._timeout
c._timeout = timeout
try:
c.load_image(f)
finally:
c._timeout = _timeout
|
Uploads an image from a local file to a Docker remote. Note that this temporarily has to extend the service timeout
period.
:param filename: Local file name.
:type filename: unicode
:param timeout: Timeout in seconds to set temporarily for the upload.
:type timeout: int
|
def create_app(self):
utils.banner("Creating Spinnaker App")
spinnakerapp = app.SpinnakerApp(app=self.app, email=self.email, project=self.group, repo=self.repo,
pipeline_config=self.configs['pipeline'])
spinnakerapp.create_app()
|
Create the spinnaker application.
|
def _diff_bounds(bounds, coord):
try:
return bounds[:, 1] - bounds[:, 0]
except IndexError:
diff = np.diff(bounds, axis=0)
return xr.DataArray(diff, dims=coord.dims, coords=coord.coords)
|
Get grid spacing by subtracting upper and lower bounds.
|
def get_fba_flux(self, objective):
flux_result = self.solve_fba(objective)
fba_fluxes = {}
for key in self._model.reactions:
fba_fluxes[key] = flux_result.get_value(self._v_wt[key])
return fba_fluxes
|
Return a dictionary of all the fluxes solved by FBA.
Dictionary of fluxes is used in :meth:`.lin_moma` and :meth:`.moma`
to minimize changes in the flux distributions following model
perturbation.
Args:
objective: The objective reaction that is maximized.
Returns:
Dictionary of fluxes for each reaction in the model.
|
def current_bed_temp(self):
try:
bedtemps = self.intervals[0]['timeseries']['tempBedC']
num_temps = len(bedtemps)
if num_temps == 0:
return None
bedtemp = bedtemps[num_temps-1][1]
except KeyError:
bedtemp = None
return bedtemp
|
Return current bed temperature for in-progress session.
|
def length(self):
return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray)
|
Array of vector lengths
|
def defaults(default=None):
def _f(func):
@functools.wraps(func)
def __f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception:
return default
return __f
return _f
|
Catches any exception thrown by the wrapped function and returns `default`
instead.
Parameters
----------
default : object
The default value to return if the wrapped function throws an exception
|
def serialise(self, element: Element) -> str:
return json.dumps(self.serialise_element(element))
|
Serialises the given element into Compact JSON.
>>> CompactJSONSerialiser().serialise(String(content='Hello'))
'["string", null, null, "Hello"]'
|
def _enforceDataType(self, data):
idx = int(data)
if idx < 0:
idx += len(self._displayValues)
assert 0 <= idx < len(self._displayValues), \
"Index should be >= 0 and < {}. Got {}".format(len(self._displayValues), idx)
return idx
|
Converts to int so that this CTI always stores that type.
The data be set to a negative value, e.g. use -1 to select the last item
by default. However, it will be converted to a positive by this method.
|
def send(self, text):
if text:
self.send_buffer += text.replace('\n', '\r\n')
self.send_pending = True
|
Send raw text to the distant end.
|
def replace(self, key, value, expire=0, noreply=None):
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'replace', {key: value}, expire, noreply)[key]
|
The memcached "replace" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If noreply is True, always returns True. Otherwise returns True if
the value was stored and False if it wasn't (because the key didn't
already exist).
|
def prep_directory(self, target_dir):
dirname = path.dirname(target_dir)
if dirname:
dirname = path.join(settings.BUILD_DIR, dirname)
if not self.fs.exists(dirname):
logger.debug("Creating directory at {}{}".format(self.fs_name, dirname))
self.fs.makedirs(dirname)
|
Prepares a new directory to store the file at the provided path, if needed.
|
def build(self):
markdown_html = markdown.markdown(self.markdown_text, extensions=[
TocExtension(), 'fenced_code', 'markdown_checklist.extension',
'markdown.extensions.tables'])
markdown_soup = BeautifulSoup(markdown_html, 'html.parser')
if markdown_soup.find('code', attrs={'class': 'mermaid'}):
self._add_mermaid_js()
for dot_tag in markdown_soup.find_all('code', attrs={'class': 'dotgraph'}):
grap_svg = self._text_to_graphiz(dot_tag.string)
graph_soup = BeautifulSoup(grap_svg, 'html.parser')
dot_tag.parent.replaceWith(graph_soup)
self.main_soup.body.append(markdown_soup)
return self.main_soup.prettify()
|
convert Markdown text as html. return the html file as string
|
def package_locations(self, package_keyname):
mask = "mask[description, keyname, locations]"
package = self.get_package_by_key(package_keyname, mask='id')
regions = self.package_svc.getRegions(id=package['id'], mask=mask)
return regions
|
List datacenter locations for a package keyname
:param str package_keyname: The package for which to get the items.
:returns: List of locations a package is orderable in
|
def search_form(context, search_model_names=None):
template_vars = {
"request": context["request"],
}
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = apps.get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
template_vars["search_model_choices"] = sorted(search_model_choices)
return template_vars
|
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
|
def merge_elisions(elided: List[str]) -> str:
results = list(elided[0])
for line in elided:
for idx, car in enumerate(line):
if car == " ":
results[idx] = " "
return "".join(results)
|
Given a list of strings with different space swapping elisions applied, merge the elisions,
taking the most without compounding the omissions.
:param elided:
:return:
>>> merge_elisions([
... "ignavae agua multum hiatus", "ignav agua multum hiatus" ,"ignavae agua mult hiatus"])
'ignav agua mult hiatus'
|
async def save(self):
old_tags = list(self._orig_data['tags'])
new_tags = list(self.tags)
self._changed_data.pop('tags', None)
await super(BlockDevice, self).save()
for tag_name in new_tags:
if tag_name not in old_tags:
await self._handler.add_tag(
system_id=self.node.system_id, id=self.id, tag=tag_name)
else:
old_tags.remove(tag_name)
for tag_name in old_tags:
await self._handler.remove_tag(
system_id=self.node.system_id, id=self.id, tag=tag_name)
self._orig_data['tags'] = new_tags
self._data['tags'] = list(new_tags)
|
Save this block device.
|
def nice_size(size):
unit = 'B'
if size > 1024:
size /= 1024.0
unit = 'KB'
if size > 1024:
size /= 1024.0
unit = 'MB'
if size > 1024:
size /= 1024.0
unit = 'GB'
return '%s %s' % (nice_number(size, max_ndigits_after_dot=2), unit)
|
Nice size.
:param size: the size.
:type size: int
:return: a nicely printed size.
:rtype: string
|
def vinet_p(v, v0, k0, k0p):
return cal_p_vinet(v, [v0, k0, k0p],
uncertainties=isuncertainties([v, v0, k0, k0p]))
|
calculate pressure from vinet equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param k0: bulk modulus at reference conditions
:param k0p: pressure derivative of bulk modulus at reference conditions
:return: pressure in GPa
|
def copy(self, old_name, new_name, index=None):
if index is None:
index = len(self)
self._copy(old_name, new_name, index)
return self[new_name]
|
Copies an old sheet with the old_name to a new sheet with new_name.
If an optional index argument is not provided then the created
sheet is appended at the end. Returns the new sheet.
|
def get_build_scanner_path(self, scanner):
env = self.get_build_env()
try:
cwd = self.batches[0].targets[0].cwd
except (IndexError, AttributeError):
cwd = None
return scanner.path(env, cwd,
self.get_all_targets(),
self.get_all_sources())
|
Fetch the scanner path for this executor's targets and sources.
|
def get_variable_grammar(self):
word_expr = Word(alphanums + '_' + '-')
word_expr2 = Word(initChars=printables, excludeChars=['{', '}', ',', ' '])
name_expr = Suppress('variable') + word_expr + Suppress('{')
state_expr = ZeroOrMore(word_expr2 + Optional(Suppress(",")))
variable_state_expr = Suppress('type') + Suppress(word_expr) + Suppress('[') + Suppress(Word(nums)) + \
Suppress(']') + Suppress('{') + Group(state_expr) + Suppress('}') + Suppress(';')
property_expr = Suppress('property') + CharsNotIn(';') + Suppress(';')
return name_expr, variable_state_expr, property_expr
|
A method that returns variable grammar
|
def process_sparser_output(output_fname, output_fmt='json'):
if output_fmt not in ['json', 'xml']:
logger.error("Unrecognized output format '%s'." % output_fmt)
return None
sp = None
with open(output_fname, 'rt') as fh:
if output_fmt == 'json':
json_dict = json.load(fh)
sp = process_json_dict(json_dict)
else:
xml_str = fh.read()
sp = process_xml(xml_str)
return sp
|
Return a processor with Statements extracted from Sparser XML or JSON
Parameters
----------
output_fname : str
The path to the Sparser output file to be processed. The file can
either be JSON or XML output from Sparser, with the output_fmt
parameter defining what format is assumed to be processed.
output_fmt : Optional[str]
The format of the Sparser output to be processed, can either be
'json' or 'xml'. Default: 'json'
Returns
-------
sp : SparserXMLProcessor or SparserJSONProcessor depending on what output
format was chosen.
|
def init_live_reload(run):
from asyncio import get_event_loop
from ._live_reload import start_child
loop = get_event_loop()
if run:
loop.run_until_complete(start_child())
else:
get_event_loop().create_task(start_child())
|
Start the live reload task
:param run: run the task inside of this function or just create it
|
def check(cls, dap, network=False, yamls=True, raises=False, logger=logger):
dap._check_raises = raises
dap._problematic = False
dap._logger = logger
problems = list()
problems += cls.check_meta(dap)
problems += cls.check_no_self_dependency(dap)
problems += cls.check_topdir(dap)
problems += cls.check_files(dap)
if yamls:
problems += cls.check_yamls(dap)
if network:
problems += cls.check_name_not_on_dapi(dap)
for problem in problems:
dap._report_problem(problem.message, problem.level)
del dap._check_raises
return not dap._problematic
|
Checks if the dap is valid, reports problems
Parameters:
network -- whether to run checks that requires network connection
output -- where to write() problems, might be None
raises -- whether to raise an exception immediately after problem is detected
|
def update_cors_configuration(
self,
enable_cors=True,
allow_credentials=True,
origins=None,
overwrite_origins=False):
if origins is None:
origins = []
cors_config = {
'enable_cors': enable_cors,
'allow_credentials': allow_credentials,
'origins': origins
}
if overwrite_origins:
return self._write_cors_configuration(cors_config)
old_config = self.cors_configuration()
updated_config = old_config.copy()
updated_config['enable_cors'] = cors_config.get('enable_cors')
updated_config['allow_credentials'] = cors_config.get('allow_credentials')
if cors_config.get('origins') == ["*"]:
updated_config['origins'] = ["*"]
elif old_config.get('origins') != cors_config.get('origins'):
new_origins = list(
set(old_config.get('origins')).union(
set(cors_config.get('origins')))
)
updated_config['origins'] = new_origins
return self._write_cors_configuration(updated_config)
|
Merges existing CORS configuration with updated values.
:param bool enable_cors: Enables/disables CORS. Defaults to True.
:param bool allow_credentials: Allows authentication credentials.
Defaults to True.
:param list origins: List of allowed CORS origin(s). Special cases are
a list containing a single "*" which will allow any origin and
an empty list which will not allow any origin. Defaults to None.
:param bool overwrite_origins: Dictates whether the origins list is
overwritten of appended to. Defaults to False.
:returns: CORS configuration update status in JSON format
|
def get_instance():
resources = []
env_resource = resource.get_from_env()
if env_resource is not None:
resources.append(env_resource)
if k8s_utils.is_k8s_environment():
resources.append(resource.Resource(
_K8S_CONTAINER, k8s_utils.get_k8s_metadata()))
if is_gce_environment():
resources.append(resource.Resource(
_GCE_INSTANCE,
gcp_metadata_config.GcpMetadataConfig().get_gce_metadata()))
elif is_aws_environment():
resources.append(resource.Resource(
_AWS_EC2_INSTANCE,
(aws_identity_doc_utils.AwsIdentityDocumentUtils()
.get_aws_metadata())))
if not resources:
return None
return resource.merge_resources(resources)
|
Get a resource based on the application environment.
Returns a `Resource` configured for the current environment, or None if the
environment is unknown or unsupported.
:rtype: :class:`opencensus.common.resource.Resource` or None
:return: A `Resource` configured for the current environment.
|
def group_channels(self, group):
path = self._path(group)
return [
self.objects[p]
for p in self.objects
if p.startswith(path + '/')]
|
Returns a list of channel objects for the given group
:param group: Name of the group to get channels for.
:rtype: List of :class:`TdmsObject` objects.
|
def tokenize(s):
s = re.sub(r'(?a)(\w+)\'s', r'\1', s)
split_pattern = r'[{} ]+'.format(re.escape(STOPCHARS))
tokens = [token for token in re.split(split_pattern, s) if not set(token) <= set(string.punctuation)]
return tokens
|
A simple tokneizer
|
def ls(manager: Manager, offset: Optional[int], limit: Optional[int]):
q = manager.session.query(Edge)
if offset:
q = q.offset(offset)
if limit > 0:
q = q.limit(limit)
for e in q:
click.echo(e.bel)
|
List edges.
|
def register_phone_view(request):
if request.method == "POST":
form = PhoneRegistrationForm(request.POST)
logger.debug(form)
if form.is_valid():
obj = form.save()
obj.user = request.user
obj.save()
messages.success(request, "Successfully added phone.")
return redirect("itemreg")
else:
messages.error(request, "Error adding phone.")
else:
form = PhoneRegistrationForm()
return render(request, "itemreg/register_form.html", {"form": form, "action": "add", "type": "phone", "form_route": "itemreg_phone"})
|
Register a phone.
|
def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
r
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'rmse', 'max_error'])
return super(LinearRegression, self).evaluate(dataset, missing_value_action=missing_value_action,
metric=metric)
|
r"""Evaluate the model by making target value predictions and comparing
to actual values.
Two metrics are used to evaluate linear regression models. The first
is root-mean-squared error (RMSE) while the second is the absolute
value of the maximum error between the actual and predicted values.
Let :math:`y` and :math:`\hat{y}` denote vectors of length :math:`N`
(number of examples) with actual and predicted values. The RMSE is
defined as:
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2}
while the max-error is defined as
.. math::
max-error = \max_{i=1}^N \|\widehat{y}_i - y_i\|
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto': Compute all metrics.
- 'rmse': Rooted mean squared error.
- 'max_error': Maximum error.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
Results from model evaluation procedure.
See Also
----------
create, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data,
target='price',
features=['bath', 'bedroom', 'size'])
>>> results = model.evaluate(data)
|
def remove_custom_field_setting(self, project, params={}, **options):
path = "/projects/%s/removeCustomFieldSetting" % (project)
return self.client.post(path, params, **options)
|
Remove a custom field setting on the project.
Parameters
----------
project : {Id} The project to associate the custom field with
[data] : {Object} Data for the request
- [custom_field] : {Id} The id of the custom field to remove from this project.
|
def setup_app(command, conf, vars):
if not pylons.test.pylonsapp:
load_environment(conf.global_conf, conf.local_conf)
|
Place any commands to setup nipapwww here
|
def get_current_scene_node():
c = cmds.namespaceInfo(':', listOnlyDependencyNodes=True, absoluteName=True, dagPath=True)
l = cmds.ls(c, type='jb_sceneNode', absoluteName=True)
if not l:
return
else:
for n in sorted(l):
if not cmds.listConnections("%s.reftrack" % n, d=False):
return n
|
Return the name of the jb_sceneNode, that describes the current scene or None if there is no scene node.
:returns: the full name of the node or none, if there is no scene node
:rtype: str | None
:raises: None
|
def _find_statistics(X, y, variogram_function,
variogram_model_parameters, coordinates_type):
delta = np.zeros(y.shape)
sigma = np.zeros(y.shape)
for i in range(y.shape[0]):
if i == 0:
continue
else:
k, ss = _krige(X[:i, :], y[:i], X[i, :], variogram_function,
variogram_model_parameters, coordinates_type)
if np.absolute(ss) < eps:
continue
delta[i] = y[i] - k
sigma[i] = np.sqrt(ss)
delta = delta[sigma > eps]
sigma = sigma[sigma > eps]
epsilon = delta/sigma
return delta, sigma, epsilon
|
Calculates variogram fit statistics.
Returns the delta, sigma, and epsilon values for the variogram fit.
These arrays are used for statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
delta: ndarray
residuals between observed values and kriged estimates for those values
sigma: ndarray
mean error in kriging estimates
epsilon: ndarray
residuals normalized by their mean error
|
def query_decl(self, **kwargs):
return self.session.query(Decl).filter_by(**kwargs).all()
|
Query declarations.
|
def variant(self):
variant = current_app.config['THEME_VARIANT']
if variant not in self.variants:
log.warning('Unkown theme variant: %s', variant)
return 'default'
else:
return variant
|
Get the current theme variant
|
def _get_value(obj, key):
if isinstance(obj, (list, tuple)):
for item in obj:
v = _find_value(key, item)
if v is not None:
return v
return None
if isinstance(obj, dict):
return obj.get(key)
if obj is not None:
return getattr(obj, key, None)
|
Get a value for 'key' from 'obj', if possible
|
def _arg(__decorated__, **Config):
r
if isinstance(__decorated__, tuple):
__decorated__[1].insert(0, Config)
return __decorated__
else:
return __decorated__, [Config]
|
r"""The worker for the arg decorator.
|
def ListGrrBinaries(context=None):
items = context.SendIteratorRequest("ListGrrBinaries", None)
return utils.MapItemsIterator(
lambda data: GrrBinary(data=data, context=context), items)
|
Lists all registered Grr binaries.
|
def _ListTags(self):
all_tags = []
self._cursor.execute('SELECT DISTINCT tag FROM log2timeline')
tag_row = self._cursor.fetchone()
while tag_row:
tag_string = tag_row[0]
if tag_string:
tags = tag_string.split(',')
for tag in tags:
if tag not in all_tags:
all_tags.append(tag)
tag_row = self._cursor.fetchone()
return all_tags
|
Query database for unique tag types.
|
def hover(self, target=None):
if target is None:
target = self._lastMatch or self
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("hover expected Pattern, String, Match, Region, or Location object")
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
|
Moves the cursor to the target location
|
def wrap_resource(self, pool, resource_wrapper):
resource = resource_wrapper(self.resource, pool)
self._weakref = weakref.ref(resource)
return resource
|
Return a resource wrapped in ``resource_wrapper``.
:param pool: A pool instance.
:type pool: :class:`CuttlePool`
:param resource_wrapper: A wrapper class for the resource.
:type resource_wrapper: :class:`Resource`
:return: A wrapped resource.
:rtype: :class:`Resource`
|
def get_nodes(self, resolve_missing=False):
result = []
resolved = False
for node_id in self._node_ids:
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is not None:
result.append(node)
continue
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
if resolved:
raise exception.DataIncomplete("Unable to resolve all nodes")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"node(w);\n"
"out body;\n"
)
query = query.format(
way_id=self.id
)
tmp_result = self._result.api.query(query)
self._result.expand(tmp_result)
resolved = True
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is None:
raise exception.DataIncomplete("Unable to resolve all nodes")
result.append(node)
return result
|
Get the nodes defining the geometry of the way
:param resolve_missing: Try to resolve missing nodes.
:type resolve_missing: Boolean
:return: List of nodes
:rtype: List of overpy.Node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
|
def return_single_convert_numpy_base(dbpath, folder_path, set_object, object_id, converter, add_args=None):
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(object_id)
session.close()
if add_args is None:
return converter(join(folder_path, tmp_object.path))
else:
return converter(join(folder_path, tmp_object.path), add_args)
|
Generic function which converts an object specified by the object_id into a numpy array and returns the array,
the conversion is done by the 'converter' function
Parameters
----------
dbpath : string, path to SQLite database file
folder_path : string, path to folder where the files are stored
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
converter : function, which takes the path of a data point and *args as parameters and returns a numpy array
add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the
converter should take only one input argument - the file path. default value: None
Returns
-------
result : ndarray
|
def serializable_value(self, obj):
value = self.__get__(obj, obj.__class__)
return self.property.serialize_value(value)
|
Produce the value as it should be serialized.
Sometimes it is desirable for the serialized value to differ from
the ``__get__`` in order for the ``__get__`` value to appear simpler
for user or developer convenience.
Args:
obj (HasProps) : the object to get the serialized attribute for
Returns:
JSON-like
|
def create_option_from_value(tag, value):
dhcp_option.parser()
fake_opt = dhcp_option(tag = tag)
for c in dhcp_option.subclasses:
if c.criteria(fake_opt):
if hasattr(c, '_parse_from_value'):
return c(tag = tag, value = c._parse_from_value(value))
else:
raise ValueError('Invalid DHCP option ' + str(tag) + ": " + repr(value))
else:
fake_opt._setextra(_tobytes(value))
return fake_opt
|
Set DHCP option with human friendly value
|
def inspect_task(self, task):
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True)
|
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
def _remove_pre_formatting(self):
preformatted_wrappers = [
'pre',
'code'
]
for wrapper in preformatted_wrappers:
for formatter in FORMATTERS:
tag = FORMATTERS[formatter]
character = formatter
regex = r'(<{w}>.*)<{t}>(.*)</{t}>(.*</{w}>)'.format(
t=tag,
w=wrapper
)
repl = r'\g<1>{c}\g<2>{c}\g<3>'.format(c=character)
self.cleaned_html = re.sub(regex, repl, self.cleaned_html)
|
Removes formatting tags added to pre elements.
|
def get_filename(request, date, size_x, size_y):
filename = '_'.join([
str(request.service_type.value),
request.layer,
str(request.bbox.crs),
str(request.bbox).replace(',', '_'),
'' if date is None else date.strftime("%Y-%m-%dT%H-%M-%S"),
'{}X{}'.format(size_x, size_y)
])
filename = OgcImageService.filename_add_custom_url_params(filename, request)
return OgcImageService.finalize_filename(filename, request.image_format)
|
Get filename location
Returns the filename's location on disk where data is or is going to be stored.
The files are stored in the folder specified by the user when initialising OGC-type
of request. The name of the file has the following structure:
{service_type}_{layer}_{crs}_{bbox}_{time}_{size_x}X{size_y}_*{custom_url_params}.{image_format}
In case of `TIFF_d32f` a `'_tiff_depth32f'` is added at the end of the filename (before format suffix)
to differentiate it from 16-bit float tiff.
:param request: OGC-type request with specified bounding box, cloud coverage for specific product.
:type request: OgcRequest or GeopediaRequest
:param date: acquisition date or None
:type date: datetime.datetime or None
:param size_x: horizontal image dimension
:type size_x: int or str
:param size_y: vertical image dimension
:type size_y: int or str
:return: filename for this request and date
:rtype: str
|
def _counts2radiance(self, counts, coefs, channel):
logger.debug('Converting counts to radiance')
if self._is_vis(channel):
slope = np.array(coefs['slope']).mean()
offset = np.array(coefs['offset']).mean()
return self._viscounts2radiance(counts=counts, slope=slope,
offset=offset)
return self._ircounts2radiance(counts=counts, scale=coefs['scale'],
offset=coefs['offset'])
|
Convert raw detector counts to radiance
|
def upload_entities_tsv(namespace, workspace, entities_tsv):
if isinstance(entities_tsv, string_types):
with open(entities_tsv, "r") as tsv:
entity_data = tsv.read()
elif isinstance(entities_tsv, io.StringIO):
entity_data = entities_tsv.getvalue()
else:
raise ValueError('Unsupported input type.')
return upload_entities(namespace, workspace, entity_data)
|
Upload entities from a tsv loadfile.
File-based wrapper for api.upload_entities().
A loadfile is a tab-separated text file with a header row
describing entity type and attribute names, followed by
rows of entities and their attribute values.
Ex:
entity:participant_id age alive
participant_23 25 Y
participant_27 35 N
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
entities_tsv (file): FireCloud loadfile, see format above
|
def __output_unpaired_vals(d_vals, used_ff_keys, f_f_header, sf_d, s_f_header,
missing_val, out_handler, outfh, delim="\t"):
if missing_val is None:
raise MissingValueError("Need missing value to output " +
" unpaired lines")
for k in d_vals:
if k not in used_ff_keys:
f_f_flds = d_vals[k]
if s_f_header is not None:
s_f_flds = [dict(zip(s_f_header, [missing_val] * len(s_f_header)))]
else:
s_f_num_cols = len(sf_d[d_vals.keys()[0]][0])
s_f_flds = [[missing_val] * s_f_num_cols]
out_handler.write_output(outfh, delim, s_f_flds, f_f_flds,
s_f_header, f_f_header)
|
Use an output handler to output keys that could not be paired.
Go over the keys in d_vals and for any that were not used (i.e. not in
used_ff_keys), build an output line using the values from d_vals,
populated the missing columns with missing_val, and output these using the
provided output hander.
|
def display_completions_like_readline(event):
b = event.current_buffer
if b.completer is None:
return
complete_event = CompleteEvent(completion_requested=True)
completions = list(b.completer.get_completions(b.document, complete_event))
common_suffix = get_common_complete_suffix(b.document, completions)
if len(completions) == 1:
b.delete_before_cursor(-completions[0].start_position)
b.insert_text(completions[0].text)
elif common_suffix:
b.insert_text(common_suffix)
elif completions:
_display_completions_like_readline(event.cli, completions)
|
Key binding handler for readline-style tab completion.
This is meant to be as similar as possible to the way how readline displays
completions.
Generate the completions immediately (blocking) and display them above the
prompt in columns.
Usage::
# Call this handler when 'Tab' has been pressed.
registry.add_binding(Keys.ControlI)(display_completions_like_readline)
|
def close(self):
endpoint = self.endpoint.replace("/api/v1/spans", "")
logger.debug("Zipkin trace may be located at this URL {}/traces/{}".format(endpoint, self.trace_id))
|
End the report.
|
def _trim_dictionary_parameters(self, dict_param):
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
params = self._copy_param(dict_param)
else:
params = {}
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params
|
Return a dict that only has matching entries in the msgid.
|
def resumption_token(parent, pagination, **kwargs):
if pagination.page == 1 and not pagination.has_next:
return
token = serialize(pagination, **kwargs)
e_resumptionToken = SubElement(parent, etree.QName(NS_OAIPMH,
'resumptionToken'))
if pagination.total:
expiration_date = datetime.utcnow() + timedelta(
seconds=current_app.config[
'OAISERVER_RESUMPTION_TOKEN_EXPIRE_TIME'
]
)
e_resumptionToken.set('expirationDate', datetime_to_datestamp(
expiration_date
))
e_resumptionToken.set('cursor', str(
(pagination.page - 1) * pagination.per_page
))
e_resumptionToken.set('completeListSize', str(pagination.total))
if token:
e_resumptionToken.text = token
|
Attach resumption token element to a parent.
|
def rulefiles(self, acc=None):
rulesdir = self.rulesdir(acc)
rules = [os.path.join(rulesdir, x) for x in self.get('rules', acc, [])]
if acc is not None:
rules += self.rulefiles(acc=None)
return rules
|
Return a list of rulefiles for the given account.
Returns an empty list if none specified.
|
def get_parler_languages_from_django_cms(cms_languages=None):
valid_keys = ['code', 'fallbacks', 'hide_untranslated',
'redirect_on_fallback']
if cms_languages:
if sys.version_info < (3, 0, 0):
int_types = (int, long)
else:
int_types = int
parler_languages = copy.deepcopy(cms_languages)
for site_id, site_config in cms_languages.items():
if site_id and (
not isinstance(site_id, int_types) and
site_id != 'default'
):
del parler_languages[site_id]
continue
if site_id == 'default':
for key, value in site_config.items():
if key not in valid_keys:
del parler_languages['default'][key]
else:
for i, lang_config in enumerate(site_config):
for key, value in lang_config.items():
if key not in valid_keys:
del parler_languages[site_id][i][key]
return parler_languages
return None
|
Converts django CMS' setting CMS_LANGUAGES into PARLER_LANGUAGES. Since
CMS_LANGUAGES is a strict superset of PARLER_LANGUAGES, we do a bit of
cleansing to remove irrelevant items.
|
def mapping_of(cls):
def mapper(data):
if not isinstance(data, Mapping):
raise TypeError(
"data must be a mapping, not %s"
% type(data).__name__)
return {
key: cls(value)
for key, value in data.items()
}
return mapper
|
Expects a mapping from some key to data for `cls` instances.
|
def usufyToGmlExport(d, fPath):
try:
oldData=nx.read_gml(fPath)
except UnicodeDecodeError as e:
print("UnicodeDecodeError:\t" + str(e))
print("Something went wrong when reading the .gml file relating to the decoding of UNICODE.")
import time as time
fPath+="_" +str(time.time())
print("To avoid losing data, the output file will be renamed to use the timestamp as:\n" + fPath + "_" + str(time.time()))
print()
oldData = nx.Graph()
except Exception as e:
oldData = nx.Graph()
newGraph = _generateGraphData(d, oldData)
nx.write_gml(newGraph,fPath)
|
Workaround to export data to a .gml file.
Args:
-----
d: Data to export.
fPath: File path for the output file.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.