code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
|---|---|
def write(s, path, encoding="utf-8"):
is_gzip = is_gzip_file(path)
with open(path, "wb") as f:
if is_gzip:
f.write(zlib.compress(s.encode(encoding)))
else:
f.write(s.encode(encoding))
|
Write string to text file.
|
def make_timestamp_columns():
return (
Column('created_at', DateTime, default=func.utcnow(), nullable=False),
Column('updated_at', DateTime, default=func.utcnow(), onupdate=func.utcnow(), nullable=False),
)
|
Return two columns, created_at and updated_at, with appropriate defaults
|
def description(filename):
with open(filename) as fp:
for lineno, line in enumerate(fp):
if lineno < 3:
continue
line = line.strip()
if len(line) > 0:
return line
|
Provide a short description.
|
def get_rows(self):
table = self.soup.find_all('tr')[1:-3]
return [row for row in table if row.contents[3].string]
|
Get the rows from a broadcast ratings chart
|
def __process_gprest_response(self, r=None, restType='GET'):
if r is None:
logging.info('No response for REST '+restType+' request')
return None
httpStatus = r.status_code
logging.info('HTTP status code: %s', httpStatus)
if httpStatus == requests.codes.ok or \
httpStatus == requests.codes.created:
jsonR = r.json()
if jsonR:
statusStr = 'REST response status: %s' % \
jsonR.get(self.__RESPONSE_STATUS_KEY)
msgStr = 'REST response message: %s' % \
jsonR.get(self.__RESPONSE_MESSAGE_KEY)
logging.info(statusStr)
logging.info(msgStr)
return jsonR
else:
logging.warning('Unable to parse JSON body.')
logging.warning(r.text)
return None
logging.warning('Invalid HTTP status code.')
logging.warning(r.text)
return r.json()
|
Returns the processed response for rest calls
|
def Header(self):
if not self._header:
self._header = Header(self.PrevHash, self.MerkleRoot, self.Timestamp,
self.Index, self.ConsensusData, self.NextConsensus, self.Script)
return self._header
|
Get the block header.
Returns:
neo.Core.Header:
|
def as_json(self,
entity_url,
context=None):
try:
urllib.request.urlopen(entity_url)
except urllib.error.HTTPError:
raise ValueError("Cannot open {}".format(entity_url))
entity_graph = self.read(entity_url)
entity_json = json.loads(
entity_graph.serialize(
format='json-ld',
context=context).decode())
return json.dumps(entity_json)
|
Method takes a entity uri and attempts to return the Fedora Object
as a JSON-LD.
Args:
entity_url(str): Fedora Commons URL of Entity
context(None): Returns JSON-LD with Context, default is None
Returns:
str: JSON-LD of Fedora Object
|
def get_all_kernels(self, kernel_ids=None, owners=None):
params = {}
if kernel_ids:
self.build_list_params(params, kernel_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
filter = {'image-type' : 'kernel'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params,
[('item', Image)], verb='POST')
|
Retrieve all the EC2 kernels available on your account.
Constructs a filter to allow the processing to happen server side.
:type kernel_ids: list
:param kernel_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
|
def split_path(path):
path = path.split(0, 1)[0]
values = path.dimension_values(0)
splits = np.concatenate([[0], np.where(np.isnan(values))[0]+1, [None]])
subpaths = []
data = PandasInterface.as_dframe(path) if pd else path.array()
for i in range(len(splits)-1):
end = splits[i+1]
slc = slice(splits[i], None if end is None else end-1)
subpath = data.iloc[slc] if pd else data[slc]
if len(subpath):
subpaths.append(subpath)
return subpaths
|
Split a Path type containing a single NaN separated path into
multiple subpaths.
|
def get_token(self):
payload = {'grant_type': 'client_credentials', 'client_id': self.client_id, 'client_secret': self.client_secret}
r = requests.post(OAUTH_ENDPOINT, data=json.dumps(payload), headers={'content-type': 'application/json'})
response = r.json()
if r.status_code != 200 and not ERROR_KEY in response:
raise GfycatClientError('Error fetching the OAUTH URL', r.status_code)
elif ERROR_KEY in response:
raise GfycatClientError(response[ERROR_KEY], r.status_code)
self.token_type = response['token_type']
self.access_token = response['access_token']
self.expires_in = response['expires_in']
self.expires_at = time.time() + self.expires_in - 5
self.headers = {'content-type': 'application/json','Authorization': self.token_type + ' ' + self.access_token}
|
Gets the authorization token
|
def cli(env, identifier):
mgr = SoftLayer.LoadBalancerManager(env.client)
_, loadbal_id = loadbal.parse_id(identifier)
if not (env.skip_confirmations or
formatting.confirm("This action will cancel a load balancer. "
"Continue?")):
raise exceptions.CLIAbort('Aborted.')
mgr.cancel_lb(loadbal_id)
env.fout('Load Balancer with id %s is being cancelled!' % identifier)
|
Cancel an existing load balancer.
|
def savepoint(self):
if self._last_image:
self._savepoints.append(self._last_image)
self._last_image = None
|
Copies the last displayed image.
|
def run(self):
if self.stdout:
sys.stdout.write("extracted json data:\n" + json.dumps(
self.metadata, default=to_str) + "\n")
else:
extract_dist.class_metadata = self.metadata
|
Sends extracted metadata in json format to stdout if stdout
option is specified, assigns metadata dictionary to class_metadata
variable otherwise.
|
def make_ggnvp(f, g=lambda x: 1./2*np.sum(x**2, axis=-1), f_argnum=0):
@unary_to_nary
def _make_ggnvp(f, x):
f_vjp, f_x = _make_vjp(f, x)
g_hvp, grad_g_x = _make_vjp(grad(g), f_x)
f_jvp, _ = _make_vjp(f_vjp, vspace(grad_g_x).zeros())
def ggnvp(v): return f_vjp(g_hvp(f_jvp(v)))
return ggnvp
return _make_ggnvp(f, f_argnum)
|
Builds a function for evaluating generalized-Gauss-Newton-vector products
at a point. Slightly more expensive than mixed-mode.
|
def close(self):
if self._S is not None:
self._S.close()
self._S = None
self._Q.put_nowait(None)
|
Begin closing subscription.
|
def group(*blueprints, url_prefix=""):
def chain(nested):
for i in nested:
if isinstance(i, (list, tuple)):
yield from chain(i)
elif isinstance(i, BlueprintGroup):
yield from i.blueprints
else:
yield i
bps = BlueprintGroup(url_prefix=url_prefix)
for bp in chain(blueprints):
if bp.url_prefix is None:
bp.url_prefix = ""
bp.url_prefix = url_prefix + bp.url_prefix
bps.append(bp)
return bps
|
Create a list of blueprints, optionally grouping them under a
general URL prefix.
:param blueprints: blueprints to be registered as a group
:param url_prefix: URL route to be prepended to all sub-prefixes
|
def install_virtualenv(parser_args):
python_version = '.'.join(str(v) for v in sys.version_info[:2])
sys.stdout.write('Installing Python {0} virtualenv into {1} \n'.format(python_version, VE_ROOT))
if sys.version_info < (3, 3):
install_virtualenv_p2(VE_ROOT, python_version)
else:
install_virtualenv_p3(VE_ROOT, python_version)
|
Installs virtual environment
|
def read_file(path):
with open(must_exist(path)) as infile:
r = infile.read()
return r
|
Read file to string.
Arguments:
path (str): Source.
|
def get_win32_short_path_name(long_name):
import ctypes
from ctypes import wintypes
_GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW
_GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD]
_GetShortPathNameW.restype = wintypes.DWORD
output_buf_size = 0
while True:
output_buf = ctypes.create_unicode_buffer(output_buf_size)
needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)
if output_buf_size >= needed:
short_name = output_buf.value
break
else:
output_buf_size = needed
return short_name
|
Gets the short path name of a given long path.
References:
http://stackoverflow.com/a/23598461/200291
http://stackoverflow.com/questions/23598289/get-win-short-fname-python
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut # NOQA
>>> # build test data
>>> #long_name = unicode(normpath(ut.get_resource_dir()))
>>> long_name = unicode(r'C:/Program Files (x86)')
>>> #long_name = unicode(r'C:/Python27')
#unicode(normpath(ut.get_resource_dir()))
>>> # execute function
>>> result = get_win32_short_path_name(long_name)
>>> # verify results
>>> print(result)
C:/PROGRA~2
|
def to_vobject(self, project=None, uid=None):
self._update()
vtodos = iCalendar()
if uid:
uid = uid.split('@')[0]
if not project:
for p in self._tasks:
if uid in self._tasks[p]:
project = p
break
self._gen_vtodo(self._tasks[basename(project)][uid], vtodos.add('vtodo'))
elif project:
for task in self._tasks[basename(project)].values():
self._gen_vtodo(task, vtodos.add('vtodo'))
else:
for project in self._tasks:
for task in self._tasks[project].values():
self._gen_vtodo(task, vtodos.add('vtodo'))
return vtodos
|
Return vObject object of Taskwarrior tasks
If filename and UID are specified, the vObject only contains that task.
If only a filename is specified, the vObject contains all events in the project.
Otherwise the vObject contains all all objects of all files associated with the IcsTask object.
project -- the Taskwarrior project
uid -- the UID of the task
|
def update_scalar_bar_range(self, clim, name=None):
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise RuntimeError('This plotter does not have an active mapper.')
return self.mapper.SetScalarRange(*clim)
def update_mapper(mapper):
return mapper.SetScalarRange(*clim)
try:
for m in self._scalar_bar_mappers[name]:
update_mapper(m)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
|
Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
|
def memory_usage(self, deep=False):
if hasattr(self.array, 'memory_usage'):
return self.array.memory_usage(deep=deep)
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.array)
return v
|
Memory usage of the values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
|
def _higher_function_scope(node):
current = node
while current.parent and not isinstance(current.parent, nodes.FunctionDef):
current = current.parent
if current and current.parent:
return current.parent
return None
|
Search for the first function which encloses the given
scope. This can be used for looking up in that function's
scope, in case looking up in a lower scope for a particular
name fails.
:param node: A scope node.
:returns:
``None``, if no parent function scope was found,
otherwise an instance of :class:`astroid.scoped_nodes.Function`,
which encloses the given node.
|
def send(self, uid, event, payload=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if uid in self.controllers.keys():
addr = self.controllers[uid][0]
port = self.controllers[uid][1]
if event == E_MESSAGE:
return sock.sendto('/message/{}'.format(payload).encode('utf-8'), (addr, port))
elif event == E_RUMBLE:
return sock.sendto('/rumble/{}'.format(payload).encode('utf-8'), (addr, port))
else:
pass
else:
pass
return False
|
Send an event to a connected controller. Use pymlgame event type and correct payload.
To send a message to the controller use pymlgame.E_MESSAGE event and a string as payload.
:param uid: Unique id of the controller
:param event: Event type
:param payload: Payload of the event
:type uid: str
:type event: Event
:type payload: str
:return: Number of bytes send or False
:rtype: int
|
def clean_params(params, drop_nones=True, recursive=True):
cleaned = {}
for key, value in six.iteritems(params):
if drop_nones and value is None:
continue
if recursive and isinstance(value, dict):
value = clean_params(value, drop_nones, recursive)
cleaned[key] = value
return cleaned
|
Clean up a dict of API parameters to be sent to the Coinbase API.
Some endpoints require boolean options to be represented as integers. By
default, will remove all keys whose value is None, so that they will not be
sent to the API endpoint at all.
|
def to_dade_matrix(M, annotations="", filename=None):
n, m = M.shape
A = np.zeros((n + 1, m + 1))
A[1:, 1:] = M
if not annotations:
annotations = np.array(["" for _ in n], dtype=str)
A[0, :] = annotations
A[:, 0] = annotations.T
if filename:
try:
np.savetxt(filename, A, fmt='%i')
print("I saved input matrix in dade format as " + str(filename))
except ValueError as e:
print("I couldn't save input matrix.")
print(str(e))
finally:
return A
return A
|
Returns a Dade matrix from input numpy matrix. Any annotations are added
as header. If filename is provided and valid, said matrix is also saved
as text.
|
def _ParseVValueString(
self, parser_mediator, data, user_information_descriptor):
data_start_offset = (
user_information_descriptor.offset + self._V_VALUE_STRINGS_OFFSET)
data_end_offset = data_start_offset + user_information_descriptor.size
descriptor_data = data[data_start_offset:data_end_offset]
try:
username = descriptor_data.decode('utf-16-le')
except (UnicodeDecodeError, UnicodeEncodeError) as exception:
username = descriptor_data.decode('utf-16-le', errors='replace')
parser_mediator.ProduceExtractionWarning((
'unable to decode V value string with error: {0!s}. Characters '
'that cannot be decoded will be replaced with "?" or '
'"\\ufffd".').format(exception))
return username
|
Parses a V value string.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
data (bytes): Windows Registry V value data.
user_information_descriptor (user_information_descriptor): V value
user information descriptor.
Returns:
str: string value stored in the Windows Registry V value data.
|
def get_raw_default_config_and_read_file_list():
global _CONFIG, _READ_DEFAULT_FILES
if _CONFIG is not None:
return _CONFIG, _READ_DEFAULT_FILES
with _CONFIG_LOCK:
if _CONFIG is not None:
return _CONFIG, _READ_DEFAULT_FILES
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import ConfigParser as SafeConfigParser
cfg = SafeConfigParser()
read_files = cfg.read(get_default_config_filename())
_CONFIG, _READ_DEFAULT_FILES = cfg, read_files
return _CONFIG, _READ_DEFAULT_FILES
|
Returns a ConfigParser object and a list of filenames that were parsed to initialize it
|
def datetime(self, field=None, val=None):
if val is None:
def source():
tzinfo = get_default_timezone() if settings.USE_TZ else None
return datetime.fromtimestamp(randrange(1, 2100000000),
tzinfo)
else:
def source():
tzinfo = get_default_timezone() if settings.USE_TZ else None
return datetime.fromtimestamp(int(val.strftime("%s")) +
randrange(-365*24*3600*2, 365*24*3600*2),
tzinfo)
return self.get_allowed_value(source, field)
|
Returns a random datetime. If 'val' is passed, a datetime within two
years of that date will be returned.
|
def _all_reachable_tables(t):
for k, v in t.items():
for tname in _all_reachable_tables(v):
yield tname
yield k
|
A generator that provides all the names of tables that can be
reached via merges starting at the given target table.
|
def get_chembl_id(nlm_mesh):
mesh_id = get_mesh_id(nlm_mesh)
pcid = get_pcid(mesh_id)
url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + \
'cid/%s/synonyms/JSON' % pcid
r = requests.get(url_mesh2pcid)
res = r.json()
synonyms = res['InformationList']['Information'][0]['Synonym']
chembl_id = [syn for syn in synonyms
if 'CHEMBL' in syn and 'SCHEMBL' not in syn][0]
return chembl_id
|
Get ChEMBL ID from NLM MESH
Parameters
----------
nlm_mesh : str
Returns
-------
chembl_id : str
|
def _interpolated_template(self, templateid):
phase, y = self._get_template_by_id(templateid)
assert phase.min() >= 0
assert phase.max() <= 1
phase = np.concatenate([phase[-5:] - 1, phase, phase[:5] + 1])
y = np.concatenate([y[-5:], y, y[:5]])
return UnivariateSpline(phase, y, s=0, k=5)
|
Return an interpolator for the given template
|
def generate(regex, Ns):
"Return the strings matching regex whose length is in Ns."
return sorted(regex_parse(regex)[0](Ns),
key=lambda s: (len(s), s))
|
Return the strings matching regex whose length is in Ns.
|
def create_intent(self,
workspace_id,
intent,
description=None,
examples=None,
**kwargs):
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if examples is not None:
examples = [self._convert_model(x, Example) for x in examples]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1', 'create_intent')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'intent': intent,
'description': description,
'examples': examples
}
url = '/v1/workspaces/{0}/intents'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response
|
Create intent.
Create a new intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str description: The description of the intent. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param list[Example] examples: An array of user input examples for the intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
|
def get_summary(result):
summary = {
"success": result.wasSuccessful(),
"stat": {
'total': result.testsRun,
'failures': len(result.failures),
'errors': len(result.errors),
'skipped': len(result.skipped),
'expectedFailures': len(result.expectedFailures),
'unexpectedSuccesses': len(result.unexpectedSuccesses)
}
}
summary["stat"]["successes"] = summary["stat"]["total"] \
- summary["stat"]["failures"] \
- summary["stat"]["errors"] \
- summary["stat"]["skipped"] \
- summary["stat"]["expectedFailures"] \
- summary["stat"]["unexpectedSuccesses"]
summary["time"] = {
'start_at': result.start_at,
'duration': result.duration
}
summary["records"] = result.records
return summary
|
get summary from test result
Args:
result (instance): HtmlTestResult() instance
Returns:
dict: summary extracted from result.
{
"success": True,
"stat": {},
"time": {},
"records": []
}
|
def untrace_function(module, function):
if not is_traced(function):
return False
name = get_object_name(function)
setattr(module, name, untracer(function))
return True
|
Untraces given module function.
:param module: Module of the function.
:type module: object
:param function: Function to untrace.
:type function: object
:return: Definition success.
:rtype: bool
|
def bz2_pack(source):
import bz2, base64
out = ""
first_line = source.split('\n')[0]
if analyze.shebang.match(first_line):
if py3:
if first_line.rstrip().endswith('python'):
first_line = first_line.rstrip()
first_line += '3'
out = first_line + '\n'
compressed_source = bz2.compress(source.encode('utf-8'))
out += 'import bz2, base64\n'
out += "exec(bz2.decompress(base64.b64decode('"
out += base64.b64encode(compressed_source).decode('utf-8')
out += "')))\n"
return out
|
Returns 'source' as a bzip2-compressed, self-extracting python script.
.. note::
This method uses up more space than the zip_pack method but it has the
advantage in that the resulting .py file can still be imported into a
python program.
|
async def emit(self, record: LogRecord):
if self.writer is None:
self.writer = await self._init_writer()
try:
msg = self.format(record) + self.terminator
self.writer.write(msg.encode())
await self.writer.drain()
except Exception:
await self.handleError(record)
|
Actually log the specified logging record to the stream.
|
def generate_phase_1(dim = 40):
phase_1 = numpy.random.normal(0, 1, dim)
for i in range(dim - 4, dim):
phase_1[i] = 1.0
return phase_1
|
The first step in creating datapoints in the Poirazi & Mel model.
This returns a vector of dimension dim, with the last four values set to
1 and the rest drawn from a normal distribution.
|
def bounds(ctx, tile):
click.echo(
'%s %s %s %s' % TilePyramid(
ctx.obj['grid'],
tile_size=ctx.obj['tile_size'],
metatiling=ctx.obj['metatiling']
).tile(*tile).bounds(pixelbuffer=ctx.obj['pixelbuffer'])
)
|
Print Tile bounds.
|
def clip_adaptor(read, adaptor):
missmatches = 2
adaptor = adaptor.truncate(10)
read.clip_end(adaptor, len(adaptor) - missmatches)
|
Clip an adaptor sequence from this sequence. We assume it's in the 3'
end. This is basically a convenience wrapper for clipThreePrime. It
requires 8 out of 10 of the first bases in the adaptor sequence to match
for clipping to occur.
:param adaptor: sequence to look for. We only use the first 10 bases;
must be a full Sequence object, not just a string.
|
def _check_column_lengths(self):
column_lengths_dict = {
name: len(xs)
for (name, xs)
in self.columns_dict.items()
}
unique_column_lengths = set(column_lengths_dict.values())
if len(unique_column_lengths) != 1:
raise ValueError(
"Mismatch between lengths of columns: %s" % (column_lengths_dict,))
|
Make sure columns are of the same length or else DataFrame construction
will fail.
|
def get_matching_property_names(self, regex):
log = logging.getLogger(self.cls_logger + '.get_matching_property_names')
prop_list_matched = []
if not isinstance(regex, basestring):
log.warn('regex arg is not a string, found type: {t}'.format(t=regex.__class__.__name__))
return prop_list_matched
log.debug('Finding properties matching regex: {r}'.format(r=regex))
for prop_name in self.properties.keys():
match = re.search(regex, prop_name)
if match:
prop_list_matched.append(prop_name)
return prop_list_matched
|
Returns a list of property names matching the provided
regular expression
:param regex: Regular expression to search on
:return: (list) of property names matching the regex
|
def export(self, Height=None, options=None, outputFile=None, Resolution=None,\
Units=None, Width=None, Zoom=None, view="current", verbose=False):
PARAMS=set_param(["Height","options","outputFile","Resolution",\
"Units","Width","Zoom","view"],\
[Height,options,outputFile,Resolution,Units,Width,Zoom,view ])
response=api(url=self.__url+"/export", PARAMS=PARAMS, method="POST", verbose=verbose)
return response
|
Exports the current view to a graphics file and returns the path to the
saved file. PNG and JPEG formats have options for scaling, while other
formats only have the option 'exportTextAsFont'. For the PDF format,
exporting text as font does not work for two-byte characters such as
Chinese or Japanese. To avoid corrupted texts in the exported PDF,
please set false to 'exportTextAsFont' when exporting networks including
those non-English characters.
:param Height (string, optional): The height of the exported image. Valid
only for bitmap formats, such as PNG and JPEG.
:param options (string, optional): The format of the output file. =
['JPEG (*.jpeg, *.jpg)', 'PDF (*.pdf)', 'PNG (*.png)', 'PostScript (*.ps)',
'SVG (*.svg)']
:param OutputFile (string, optional): The path name of the file where
the view must be saved to. By default, the view's title is used as
the file name.
:param Resolution (string, optional): The resolution of the exported
image, in DPI. Valid only for bitmap formats, when the selected width
and height 'units' is inches. The possible values are: 72 (default),
100, 150, 300, 600. = ['72', '100', '150', '300', '600']
:param Units (string, optional): The units for the 'width' and 'height'
values. Valid only for bitmap formats, such as PNG and JPEG. The
possible values are: pixels (default), inches. = ['pixels', 'inches']
:param Width (string, optional): The width of the exported image. Valid
only for bitmap formats, such as PNG and JPEG.
:param Zoom (string, optional): The zoom value to proportionally scale
the image. The default value is 100.0. Valid only for bitmap formats,
such as PNG and JPEG
:param verbose: print more
:returns: path to the saved file
|
def params_to_dict(params, dct):
for param, val in params.items():
if val is None:
continue
dct[param] = val
return dct
|
Updates the 'dct' dictionary with the 'params' dictionary, filtering out
all those whose param value is None.
|
def taxonomy(value):
try:
value.encode('ascii')
except UnicodeEncodeError:
raise ValueError('tag %r is not ASCII' % value)
if re.search(r'\s', value):
raise ValueError('The taxonomy %r contains whitespace chars' % value)
return value
|
Any ASCII character goes into a taxonomy, except spaces.
|
def install(name=None, refresh=False, version=None, pkgs=None, **kwargs):
if refresh:
refresh_db()
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name,
pkgs,
**kwargs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
if not pkg_params:
return {}
if pkgs is None and version and len(pkg_params) == 1:
pkg_params = {name: version}
targets = []
for param, pkgver in six.iteritems(pkg_params):
if pkgver is None:
targets.append(param)
else:
targets.append('{0}-{1}'.format(param, pkgver))
cmd = '/opt/csw/bin/pkgutil -yu {0}'.format(' '.join(targets))
old = list_pkgs()
__salt__['cmd.run_all'](cmd)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new)
|
Install packages using the pkgutil tool.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package_name>
salt '*' pkg.install SMClgcc346
Multiple Package Installation Options:
pkgs
A list of packages to install from OpenCSW. Must be passed as a python
list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3"}]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
|
def get_target_list(self, scan_id):
target_list = []
for target, _, _ in self.scans_table[scan_id]['targets']:
target_list.append(target)
return target_list
|
Get a scan's target list.
|
def load_pickle(file_path):
pkl_file = open(file_path, 'rb')
data = pickle.load(pkl_file)
pkl_file.close()
return data
|
Unpickle some data from a given path.
Input: - file_path: Target file path.
Output: - data: The python object that was serialized and stored in disk.
|
def fatalities_range(number):
range_format = '{min_range} - {max_range}'
more_than_format = '> {min_range}'
ranges = [
[0, 100],
[100, 1000],
[1000, 10000],
[10000, 100000],
[100000, float('inf')]
]
for r in ranges:
min_range = r[0]
max_range = r[1]
if max_range == float('inf'):
return more_than_format.format(
min_range=add_separators(min_range))
elif min_range <= number <= max_range:
return range_format.format(
min_range=add_separators(min_range),
max_range=add_separators(max_range))
|
A helper to return fatalities as a range of number.
See https://github.com/inasafe/inasafe/issues/3666#issuecomment-283565297
:param number: The exact number. Will be converted as a range.
:type number: int, float
:return: The range of the number.
:rtype: str
|
def get_chunked_content(self, chunksize=4096):
r = self.obj.api.getDatastreamDissemination(self.obj.pid, self.id,
stream=True, asOfDateTime=self.as_of_date)
for chunk in r.iter_content(chunksize):
yield chunk
|
Generator that returns the datastream content in chunks, so
larger datastreams can be used without reading the entire
contents into memory.
|
def _add_volume(line):
section = _analyse_status_type(line)
fields = line.strip().split()
volume = {}
for field in fields:
volume[field.split(':')[0]] = field.split(':')[1]
if section == 'LOCALDISK':
resource['local volumes'].append(volume)
else:
lastpnodevolumes.append(volume)
|
Analyse the line of volumes of ``drbdadm status``
|
def run():
args = parse_args()
if args.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(
level=log_level,
format='%(asctime)s %(levelname)s %(name)s: %(message)s')
if not args.verbose:
req_logger = logging.getLogger('requests')
req_logger.setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
logger.info('refresh-lsst-bib version {}'.format(__version__))
error_count = process_bib_files(args.dir)
sys.exit(error_count)
|
Command line entrypoint for the ``refresh-lsst-bib`` program.
|
def Output(self):
self.Open()
self.Header()
self.Body()
self.Footer()
|
Output all sections of the page.
|
def disable_requiretty_on_sudoers(log=False):
if log:
bookshelf2.logging_helpers.log_green(
'disabling requiretty on sudo calls')
comment_line('/etc/sudoers',
'^Defaults.*requiretty', use_sudo=True)
return True
|
allow sudo calls through ssh without a tty
|
def list_all(self, before_id=None, since_id=None, **kwargs):
return self.list(before_id=before_id, since_id=since_id, **kwargs).autopage()
|
Return all direct messages.
The messages come in reversed order (newest first). Note you can only
provide _one_ of ``before_id``, ``since_id``.
:param str before_id: message ID for paging backwards
:param str since_id: message ID for most recent messages since
:return: direct messages
:rtype: generator
|
def fullpath(relpath):
if (type(relpath) is object or type(relpath) is file):
relpath = relpath.name
return os.path.abspath(os.path.expanduser(relpath))
|
Relative path to absolute
|
def space_clone(args):
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("Error: destination project and namespace must differ from"
" cloned workspace")
return 1
r = fapi.clone_workspace(args.project, args.workspace, args.to_project,
args.to_workspace)
fapi._check_response_code(r, 201)
if fcconfig.verbosity:
msg = "{}/{} successfully cloned to {}/{}".format(
args.project, args.workspace,
args.to_project, args.to_workspace)
print(msg)
return 0
|
Replicate a workspace
|
def build_response(headers: Headers, key: str) -> None:
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
headers["Sec-WebSocket-Accept"] = accept(key)
|
Build a handshake response to send to the client.
``key`` comes from :func:`check_request`.
|
def return_hdr(ts, package):
try:
fdno = os.open(package, os.O_RDONLY)
except OSError:
hdr = None
return hdr
ts.setVSFlags(~(rpm.RPMVSF_NOMD5 | rpm.RPMVSF_NEEDPAYLOAD))
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error:
hdr = None
raise rpm.error
if type(hdr) != rpm.hdr:
hdr = None
ts.setVSFlags(0)
os.close(fdno)
return hdr
|
Hand back the hdr - duh - if the pkg is foobar handback None
Shamelessly stolen from Seth Vidal
http://yum.baseurl.org/download/misc/checksig.py
|
def create(state, host, ctid, template=None):
current_containers = host.fact.openvz_containers
if ctid in current_containers:
raise OperationError(
'An OpenVZ container with CTID {0} already exists'.format(ctid),
)
args = ['{0}'.format(ctid)]
if template:
args.append('--ostemplate {0}'.format(template))
yield 'vzctl create {0}'.format(' '.join(args))
|
Create OpenVZ containers.
+ ctid: CTID of the container to create
|
def from_traceback(cls, tb):
while tb.tb_next:
tb = tb.tb_next
return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
|
Construct a Bytecode from the given traceback
|
def filter(self, *args):
if len(args) == 1 and isinstance(args[0], Filter):
filter = args[0]
else:
filter = Filter(*args)
filter.object_getattr = self.object_getattr
self.filters.append(filter)
return self
|
Adds a Filter to this query.
Args:
see :py:class:`Filter <datastore.query.Filter>` constructor
Returns self for JS-like method chaining::
query.filter('age', '>', 18).filter('sex', '=', 'Female')
|
def add_directives(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames=List[str],
):
for k, v in list(kb_app.config.resources.items()):
sphinx_app.add_directive(k, ResourceDirective)
|
For each resource type, register a new Sphinx directive
|
def local_title(self):
name = self.title.partition(" for ")[0]
exceptDate = getLocalDate(self.except_date, self.time_from, self.tz)
title = _("{exception} for {date}").format(exception=_(name),
date=dateFormat(exceptDate))
return title
|
Localised version of the human-readable title of the page.
|
def to_dict(self):
attributes = dict(self.attributes.items())
if self.style:
attributes.update({"style": dict(self.style.items())})
vdom_dict = {'tagName': self.tag_name, 'attributes': attributes}
if self.event_handlers:
event_handlers = dict(self.event_handlers.items())
for key, value in event_handlers.items():
value = create_event_handler(key, value)
event_handlers[key] = value
vdom_dict['eventHandlers'] = event_handlers
if self.key:
vdom_dict['key'] = self.key
vdom_dict['children'] = [c.to_dict() if isinstance(c, VDOM) else c for c in self.children]
return vdom_dict
|
Converts VDOM object to a dictionary that passes our schema
|
def get_output(self, job_id, outfn):
job_info = self.job_info(jobid=job_id)[0]
status = int(job_info["Status"])
if status != 5:
raise Exception("The status of job %d is %d (%s)"
%(job_id, status, self.status_codes[status]))
remotefn = job_info["OutputLoc"]
r = requests.get(remotefn)
code = r.status_code
if code != 200:
raise Exception("Getting file %s yielded status: %d"
%(remotefn, code))
try:
outfn.write(r.content)
except AttributeError:
f = open(outfn, "wb")
f.write(r.content)
f.close()
|
Download an output file given the id of the output request job.
## Arguments
* `job_id` (int): The id of the _output_ job.
* `outfn` (str): The file where the output should be stored.
May also be a file-like object with a 'write' method.
|
def facets_area(self):
area_faces = self.area_faces
areas = np.array([sum(area_faces[i])
for i in self.facets],
dtype=np.float64)
return areas
|
Return an array containing the area of each facet.
Returns
---------
area : (len(self.facets),) float
Total area of each facet (group of faces)
|
def by_user(config):
client = Client()
client.prepare_connection()
audit_api = API(client)
CLI.parse_membership('Groups by User', audit_api.by_user())
|
Display LDAP group membership sorted by user.
|
def clear_layout(layout: QLayout) -> None:
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
clear_layout(item.layout())
|
Clear the layout off all its components
|
def get_builtin_date(date, date_format="%Y-%m-%dT%H:%M:%S", raise_exception=False):
if isinstance(date, datetime.datetime):
return date
elif isinstance(date, xmlrpc_client.DateTime):
return datetime.datetime.strptime(date.value, "%Y%m%dT%H:%M:%S")
else:
try:
return datetime.datetime.strptime(date, date_format)
except ValueError:
if raise_exception:
raise
else:
return None
|
Try to convert a date to a builtin instance of ``datetime.datetime``.
The input date can be a ``str``, a ``datetime.datetime``, a ``xmlrpc.client.Datetime`` or a ``xmlrpclib.Datetime``
instance. The returned object is a ``datetime.datetime``.
:param date: The date object to convert.
:param date_format: If the given date is a str, format is passed to strptime to parse it
:param raise_exception: If set to True, an exception will be raised if the input string cannot be parsed
:return: A valid ``datetime.datetime`` instance
|
def set_default(self, default):
if default is not None and len(default) > 1 and default[0] == '"' and default[-1] == '"':
default = default[1:-1]
self.defaultValue = default
|
Set Definition default value.
:param default: default value; number, str or quoted str ("value")
|
def search(self, start_ts, end_ts):
for meta_collection_name in self._meta_collections():
meta_coll = self.meta_database[meta_collection_name]
for ts_ns_doc in meta_coll.find(
{"_ts": {"$lte": end_ts, "$gte": start_ts}}
):
yield ts_ns_doc
|
Called to query Mongo for documents in a time range.
|
def _match_to_array(m):
return [_cast_biopax_element(m.get(i)) for i in range(m.varSize())]
|
Returns an array consisting of the elements obtained from a pattern
search cast into their appropriate classes.
|
def combine_cache_keys(cls, cache_keys):
if len(cache_keys) == 1:
return cache_keys[0]
else:
combined_id = Target.maybe_readable_combine_ids(cache_key.id for cache_key in cache_keys)
combined_hash = hash_all(sorted(cache_key.hash for cache_key in cache_keys))
return cls(combined_id, combined_hash)
|
Returns a cache key for a list of target sets that already have cache keys.
This operation is 'idempotent' in the sense that if cache_keys contains a single key
then that key is returned.
Note that this operation is commutative but not associative. We use the term 'combine' rather
than 'merge' or 'union' to remind the user of this. Associativity is not a necessary property,
in practice.
|
def load_entry_point_system_roles(self, entry_point_group):
for ep in pkg_resources.iter_entry_points(group=entry_point_group):
self.register_system_role(ep.load())
|
Load system roles from an entry point group.
:param entry_point_group: The entrypoint for extensions.
|
def closing(image, radius=None, mask=None, footprint = None):
dilated_image = grey_dilation(image, radius, mask, footprint)
return grey_erosion(dilated_image, radius, mask, footprint)
|
Do a morphological closing
image - pixel image to operate on
radius - use a structuring element with the given radius. If no structuring
element, use an 8-connected structuring element.
mask - if present, only use unmasked pixels for operations
|
def extract_keywords_from_text(index_page, no_items=5):
index_page = MLStripper.strip_tags(index_page)
tokenized_index = TextBlob(index_page).lower()
def to_str(key):
if isinstance(key, unicode):
return key.encode("utf-8")
return key
present_keywords = [
KEYWORDS_LOWER[key]
for key in KEYWORDS_LOWER.keys()
if len(key) > 3 and key in tokenized_index
]
def to_source_string(key):
source = "Keyword analysis"
try:
return SourceString(key, source)
except UnicodeEncodeError:
return SourceString(key.encode("utf-8"), source)
multi_keywords = [
to_source_string(key)
for key in present_keywords
if tokenized_index.words.count(key) >= 1
]
multi_keywords = sorted(multi_keywords, key=lambda x: len(x), reverse=True)
if len(multi_keywords) > no_items:
return multi_keywords[:no_items]
return multi_keywords
|
Try to process text on the `index_page` deduce the keywords and then try
to match them on the Aleph's dataset.
Function returns maximally `no_items` items, to prevent spamming the user.
Args:
index_page (str): Content of the page as UTF-8 string
no_items (int, default 5): Number of items to return.
Returns:
list: List of :class:`.SourceString` objects.
|
def project_point(cb, msg, attributes=('x', 'y')):
if skip(cb, msg, attributes): return msg
plot = get_cb_plot(cb)
x, y = msg.get('x', 0), msg.get('y', 0)
crs = plot.current_frame.crs
coordinates = crs.transform_points(plot.projection, np.array([x]), np.array([y]))
msg['x'], msg['y'] = coordinates[0, :2]
return {k: v for k, v in msg.items() if k in attributes}
|
Projects a single point supplied by a callback
|
def set_color_in_session(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = False
if 'Color' in intent['slots']:
favorite_color = intent['slots']['Color']['value']
session_attributes = create_favorite_color_attributes(favorite_color)
speech_output = "I now know your favorite color is " + \
favorite_color + \
". You can ask me your favorite color by saying, " \
"what's my favorite color?"
reprompt_text = "You can ask me your favorite color by saying, " \
"what's my favorite color?"
else:
speech_output = "I'm not sure what your favorite color is. " \
"Please try again."
reprompt_text = "I'm not sure what your favorite color is. " \
"You can tell me your favorite color by saying, " \
"my favorite color is red."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
|
Sets the color in the session and prepares the speech to reply to the
user.
|
def flatten(cls, stats):
flat_children = {}
for _stats in spread_stats(stats):
key = (_stats.name, _stats.filename, _stats.lineno, _stats.module)
try:
flat_stats = flat_children[key]
except KeyError:
flat_stats = flat_children[key] = cls(*key)
flat_stats.own_hits += _stats.own_hits
flat_stats.deep_hits += _stats.deep_hits
flat_stats.own_time += _stats.own_time
flat_stats.deep_time += _stats.deep_time
children = list(itervalues(flat_children))
return cls(stats.name, stats.filename, stats.lineno, stats.module,
stats.own_hits, stats.deep_hits, stats.own_time,
stats.deep_time, children)
|
Makes a flat statistics from the given statistics.
|
def layers(self):
layer = ['NONE'] * len(self.entities)
for i, e in enumerate(self.entities):
if hasattr(e, 'layer'):
layer[i] = str(e.layer)
return layer
|
If entities have a layer defined, return it.
Returns
---------
layers: (len(entities), ) list of str
|
async def delView(self, iden):
if iden == self.iden:
raise s_exc.SynErr(mesg='cannot delete the main view')
view = self.views.pop(iden, None)
if view is None:
raise s_exc.NoSuchView(iden=iden)
await self.hive.pop(('cortex', 'views', iden))
await view.fini()
|
Delete a cortex view by iden.
|
def _get_relationships(model):
relationships = []
for name, relationship in inspect(model).relationships.items():
class_ = relationship.mapper.class_
if relationship.uselist:
rel = ListRelationship(name, relation=class_.__name__)
else:
rel = Relationship(name, relation=class_.__name__)
relationships.append(rel)
return tuple(relationships)
|
Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple
|
def _point_in_bbox(point, bounds):
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
|
valid whether the point is inside the bounding box
|
def midpoint(self):
midpoints = []
for segment in self:
if len(segment) < 2:
midpoints.append([])
else:
midpoints.append(segment.midpoint())
return midpoints
|
Calculate the midpoint between locations in segments.
Returns:
list of Point: Groups of midpoint between points in segments
|
def post_command(self, command, args):
self._loop.log_coroutine(self.send_command(command, args, Verifier()))
|
Post a command asynchronously and don't wait for a response.
There is no notification of any error that could happen during
command execution. A log message will be generated if an error
occurred. The command's response is discarded.
This method is thread-safe and may be called from inside or ouside
of the background event loop. If there is no websockets connection,
no error will be raised (though an error will be logged).
Args:
command (string): The command name
args (dict): Optional arguments
|
def DiffArrayObjects(self, oldObj, newObj, isElementLinks=False):
if oldObj == newObj:
return True
if not oldObj or not newObj:
return False
if len(oldObj) != len(newObj):
__Log__.debug('DiffArrayObjects: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
firstObj = oldObj[0]
if IsPrimitiveType(firstObj):
return self.DiffPrimitiveArrays(oldObj, newObj)
elif isinstance(firstObj, types.ManagedObject):
return self.DiffAnyArrays(oldObj, newObj, isElementLinks)
elif isinstance(firstObj, types.DataObject):
return self.DiffDoArrays(oldObj, newObj, isElementLinks)
else:
raise TypeError("Unknown type: %s" % oldObj.__class__)
|
Method which deligates the diffing of arrays based on the type
|
def look_up_and_get(cellpy_file_name, table_name):
root = '/CellpyData'
table_path = '/'.join([root, table_name])
logging.debug(f"look_up_and_get({cellpy_file_name}, {table_name}")
store = pd.HDFStore(cellpy_file_name)
table = store.select(table_path)
store.close()
return table
|
Extracts table from cellpy hdf5-file.
|
def encode_space_pad(instr, length, encoding):
output = instr.decode('utf-8').encode(encoding)
if len(output) > length:
raise pycdlibexception.PyCdlibInvalidInput('Input string too long!')
encoded_space = ' '.encode(encoding)
left = length - len(output)
while left > 0:
output += encoded_space
left -= len(encoded_space)
if left < 0:
output = output[:left]
return output
|
A function to pad out an input string with spaces to the length specified.
The space is first encoded into the specified encoding, then appended to
the input string until the length is reached.
Parameters:
instr - The input string to encode and pad.
length - The length to pad the input string to.
encoding - The encoding to use.
Returns:
The input string encoded in the encoding and padded with encoded spaces.
|
def update(self, newcfg):
for key in newcfg.keys():
if key not in self._cfg:
self._cfg[key] = CaseInsensitiveDict()
for skey in newcfg[key]:
self._cfg[key][skey] = newcfg[key][skey]
|
Update current config with a dictionary
|
def write_image(self, img, extname=None, extver=None,
compress=None, tile_dims=None, header=None):
self.create_image_hdu(img,
header=header,
extname=extname, extver=extver,
compress=compress, tile_dims=tile_dims)
if header is not None:
self[-1].write_keys(header)
self[-1]._update_info()
|
Create a new image extension and write the data.
parameters
----------
img: ndarray
An n-dimensional image.
extname: string, optional
An optional extension name.
extver: integer, optional
FITS allows multiple extensions to have the same name (extname).
These extensions can optionally specify an EXTVER version number in
the header. Send extver= to set a particular version, which will
be represented in the header with keyname EXTVER. The extver must
be an integer > 0. If extver is not sent, the first one will be
selected. If ext is an integer, the extver is ignored.
compress: string, optional
A string representing the compression algorithm for images,
default None.
Can be one of
'RICE'
'GZIP'
'GZIP_2'
'PLIO' (no unsigned or negative integers)
'HCOMPRESS'
(case-insensitive) See the cfitsio manual for details.
header: FITSHDR, list, dict, optional
A set of header keys to write. Can be one of these:
- FITSHDR object
- list of dictionaries containing 'name','value' and optionally
a 'comment' field; the order is preserved.
- a dictionary of keyword-value pairs; no comments are written
in this case, and the order is arbitrary.
Note required keywords such as NAXIS, XTENSION, etc are cleaed out.
restrictions
------------
The File must be opened READWRITE
|
def cutout_shape(self, shape_obj):
view, mask = self.get_shape_view(shape_obj)
data = self._slice(view)
mdata = np.ma.array(data, mask=np.logical_not(mask))
return mdata
|
Cut out and return a portion of the data corresponding to `shape_obj`.
A masked numpy array is returned, where the pixels not enclosed in
the shape are masked out.
|
def setup(__pkg: ModuleType) -> Tuple[Callable[[str], str],
Callable[[str, str, int], str]]:
package_locale = path.join(path.dirname(__pkg.__file__), 'locale')
gettext.install(__pkg.__name__, package_locale)
return gettext.gettext, gettext.ngettext
|
Configure ``gettext`` for given package.
Args:
__pkg: Package to use as location for :program:`gettext` files
Returns:
:program:`gettext` functions for singular and plural translations
|
def copy(self, overrides=None, locked=False):
other = copy.copy(self)
if overrides is not None:
other.overrides = overrides
other.locked = locked
other._uncache()
return other
|
Create a separate copy of this config.
|
def _propagate_incompatibility(
self, incompatibility
):
unsatisfied = None
for term in incompatibility.terms:
relation = self._solution.relation(term)
if relation == SetRelation.DISJOINT:
return
elif relation == SetRelation.OVERLAPPING:
if unsatisfied is not None:
return
unsatisfied = term
if unsatisfied is None:
return _conflict
self._log(
"derived: {}{}".format(
"not " if unsatisfied.is_positive() else "", unsatisfied.dependency
)
)
self._solution.derive(
unsatisfied.dependency, not unsatisfied.is_positive(), incompatibility
)
return unsatisfied.dependency.name
|
If incompatibility is almost satisfied by _solution, adds the
negation of the unsatisfied term to _solution.
If incompatibility is satisfied by _solution, returns _conflict. If
incompatibility is almost satisfied by _solution, returns the
unsatisfied term's package name.
Otherwise, returns None.
|
def prep_parallel(self, binary_args, other_args):
if self.length < 100:
raise Exception("Run this across 1 processor by setting num_processors kwarg to None.")
if self.num_processors == -1:
self.num_processors = mp.cpu_count()
split_val = int(np.ceil(self.length/self.num_splits))
split_inds = [self.num_splits*i for i in np.arange(1, split_val)]
inds_split_all = np.split(np.arange(self.length), split_inds)
self.args = []
for i, ind_split in enumerate(inds_split_all):
trans_args = []
for arg in binary_args:
try:
trans_args.append(arg[ind_split])
except TypeError:
trans_args.append(arg)
self.args.append((i, tuple(trans_args)) + other_args)
return
|
Prepare the parallel calculations
Prepares the arguments to be run in parallel.
It will divide up arrays according to num_splits.
Args:
binary_args (list): List of binary arguments for input into the SNR function.
other_args (tuple of obj): tuple of other args for input into parallel snr function.
|
def chunks(arr, size):
for i in _range(0, len(arr), size):
yield arr[i:i+size]
|
Splits a list into chunks
:param arr: list to split
:type arr: :class:`list`
:param size: number of elements in each chunk
:type size: :class:`int`
:return: generator object
:rtype: :class:`generator`
|
def log_critical(msg, logger="TaskLogger"):
tasklogger = get_tasklogger(logger)
tasklogger.critical(msg)
return tasklogger
|
Log a CRITICAL message
Convenience function to log a message to the default Logger
Parameters
----------
msg : str
Message to be logged
name : `str`, optional (default: "TaskLogger")
Name used to retrieve the unique TaskLogger
Returns
-------
logger : TaskLogger
|
def bind(end_point, socket_type):
sock = context.socket(socket_type)
try:
sock.bind(end_point)
except zmq.error.ZMQError as exc:
sock.close()
raise exc.__class__('%s: %s' % (exc, end_point))
return sock
|
Bind to a zmq URL; raise a proper error if the URL is invalid; return
a zmq socket.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.