code
stringlengths 59
4.4k
| docstring
stringlengths 5
7.69k
|
|---|---|
def bubble_at_P(P, zs, vapor_pressure_eqns, fugacities=None, gammas=None):
def bubble_P_error(T):
Psats = [VP(T) for VP in vapor_pressure_eqns]
Pcalc = bubble_at_T(zs, Psats, fugacities, gammas)
return P - Pcalc
T_bubble = newton(bubble_P_error, 300)
return T_bubble
|
Calculates bubble point for a given pressure
Parameters
----------
P : float
Pressure, [Pa]
zs : list[float]
Overall mole fractions of all species, [-]
vapor_pressure_eqns : list[functions]
Temperature dependent function for each specie, Returns Psat, [Pa]
fugacities : list[float], optional
fugacities of each species, defaults to list of ones, [-]
gammas : list[float], optional
gammas of each species, defaults to list of ones, [-]
Returns
-------
Tbubble : float, optional
Temperature of bubble point at pressure `P`, [K]
|
def rst2md(text):
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'$${0}$$'.format(match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+)`')
text = re.sub(inline_math, r'$\1$', text)
return text
|
Converts the RST text from the examples docstrigs and comments
into markdown text for the IPython notebooks
|
def ftp_folder_match(ftp,localFolder,deleteStuff=True):
for fname in glob.glob(localFolder+"/*.*"):
ftp_upload(ftp,fname)
return
|
upload everything from localFolder into the current FTP folder.
|
def reboot_server(self, datacenter_id, server_id):
response = self._perform_request(
url='/datacenters/%s/servers/%s/reboot' % (
datacenter_id,
server_id),
method='POST-ACTION')
return response
|
Reboots the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
|
def hsv_to_rgb(hsv):
h, s, v = hsv
c = v * s
h /= 60
x = c * (1 - abs((h % 2) - 1))
m = v - c
if h < 1:
res = (c, x, 0)
elif h < 2:
res = (x, c, 0)
elif h < 3:
res = (0, c, x)
elif h < 4:
res = (0, x, c)
elif h < 5:
res = (x, 0, c)
elif h < 6:
res = (c, 0, x)
else:
raise ColorException("Unable to convert from HSV to RGB")
r, g, b = res
return round((r + m)*255, 3), round((g + m)*255, 3), round((b + m)*255, 3)
|
Convert an HSV color representation to an RGB color representation.
(h, s, v) :: h -> [0, 360)
s -> [0, 1]
v -> [0, 1]
:param hsv: A tuple of three numeric values corresponding to the hue, saturation, and value.
:return: RGB representation of the input HSV value.
:rtype: tuple
|
def mget(self, keys, *args):
args = list_or_args(keys, args)
server_keys = {}
ret_dict = {}
for key in args:
server_name = self.get_server_name(key)
server_keys[server_name] = server_keys.get(server_name, [])
server_keys[server_name].append(key)
for server_name, sub_keys in iteritems(server_keys):
values = self.connections[server_name].mget(sub_keys)
ret_dict.update(dict(zip(sub_keys, values)))
result = []
for key in args:
result.append(ret_dict.get(key, None))
return result
|
Returns a list of values ordered identically to ``keys``
|
def add_variability_to_fakelc_collection(simbasedir,
override_paramdists=None,
overwrite_existingvar=False):
infof = os.path.join(simbasedir,'fakelcs-info.pkl')
with open(infof, 'rb') as infd:
lcinfo = pickle.load(infd)
lclist = lcinfo['lcfpath']
varflag = lcinfo['isvariable']
vartypes = lcinfo['vartype']
vartind = 0
varinfo = {}
for lc, varf, _lcind in zip(lclist, varflag, range(len(lclist))):
if varf:
thisvartype = vartypes[vartind]
if (override_paramdists and
isinstance(override_paramdists, dict) and
thisvartype in override_paramdists and
isinstance(override_paramdists[thisvartype], dict)):
thisoverride_paramdists = override_paramdists[thisvartype]
else:
thisoverride_paramdists = None
varlc = add_fakelc_variability(
lc, thisvartype,
override_paramdists=thisoverride_paramdists,
overwrite=overwrite_existingvar
)
varinfo[varlc['objectid']] = {'params': varlc['actual_varparams'],
'vartype': varlc['actual_vartype']}
vartind = vartind + 1
else:
varlc = add_fakelc_variability(
lc, None,
overwrite=overwrite_existingvar
)
varinfo[varlc['objectid']] = {'params': varlc['actual_varparams'],
'vartype': varlc['actual_vartype']}
lcinfo['varinfo'] = varinfo
tempoutf = '%s.%s' % (infof, md5(npr.bytes(4)).hexdigest()[-8:])
with open(tempoutf, 'wb') as outfd:
pickle.dump(lcinfo, outfd, pickle.HIGHEST_PROTOCOL)
if os.path.exists(tempoutf):
shutil.copy(tempoutf, infof)
os.remove(tempoutf)
else:
LOGEXCEPTION('could not write output light curve file to dir: %s' %
os.path.dirname(tempoutf))
raise
return lcinfo
|
This adds variability and noise to all fake LCs in `simbasedir`.
If an object is marked as variable in the `fakelcs-info`.pkl file in
`simbasedir`, a variable signal will be added to its light curve based on
its selected type, default period and amplitude distribution, the
appropriate params, etc. the epochs for each variable object will be chosen
uniformly from its time-range (and may not necessarily fall on a actual
observed time). Nonvariable objects will only have noise added as determined
by their params, but no variable signal will be added.
Parameters
----------
simbasedir : str
The directory containing the fake LCs to process.
override_paramdists : dict
This can be used to override the stored variable parameters in each fake
LC. It should be a dict of the following form::
{'<vartype1>': {'<param1>: a scipy.stats distribution function or
the np.random.randint function,
.
.
.
'<paramN>: a scipy.stats distribution function
or the np.random.randint function}
for any vartype in VARTYPE_LCGEN_MAP. These are used to override the
default parameter distributions for each variable type.
overwrite_existingvar : bool
If this is True, then will overwrite any existing variability in the
input fake LCs in `simbasedir`.
Returns
-------
dict
This returns a dict containing the fake LC filenames as keys and
variability info for each as values.
|
def del_attr(self, name):
if name in self.namespace:
if name in self.cells:
self.del_cells(name)
elif name in self.spaces:
self.del_space(name)
elif name in self.refs:
self.del_ref(name)
else:
raise RuntimeError("Must not happen")
else:
raise KeyError("'%s' not found in Space '%s'" % (name, self.name))
|
Implementation of attribute deletion
``del space.name`` by user script
Called from ``StaticSpace.__delattr__``
|
def sanitize_filename(filename):
token = generate_drop_id()
name, extension = splitext(filename)
if extension:
return '%s%s' % (token, extension)
else:
return token
|
preserve the file ending, but replace the name with a random token
|
def _getEphemeralMembers(self):
e = BacktrackingTM._getEphemeralMembers(self)
if self.makeCells4Ephemeral:
e.extend(['cells4'])
return e
|
List of our member variables that we don't need to be saved
|
def match(self, subsetLines, offsetOfSubset, fileName):
for (offset,l) in enumerate(subsetLines):
column = l.find(self.literal)
if column != -1:
truePosition = offset + offsetOfSubset
_logger.debug('Found match on line {}, col {}'.format(str(truePosition+ 1), column))
_logger.debug('Line is {}'.format(l))
self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +1)
return truePosition
self.failed = True
raise DirectiveException(self)
|
Search through lines for match.
Raise an Exception if fail to match
If match is succesful return the position the match was found
|
def detect_actual_closed_dates(self, issues, kind):
if self.options.verbose:
print("Fetching closed dates for {} {}...".format(
len(issues), kind)
)
all_issues = copy.deepcopy(issues)
for issue in all_issues:
if self.options.verbose > 2:
print(".", end="")
if not issues.index(issue) % 30:
print("")
self.find_closed_date_by_commit(issue)
if not issue.get('actual_date', False):
if issue.get('closed_at', False):
print("Skipping closed non-merged issue:
issue["number"], issue["title"]))
all_issues.remove(issue)
if self.options.verbose > 2:
print(".")
return all_issues
|
Find correct closed dates, if issues was closed by commits.
:param list issues: issues to check
:param str kind: either "issues" or "pull requests"
:rtype: list
:return: issues with updated closed dates
|
def niftilist_mask_to_array(img_filelist, mask_file=None, outdtype=None):
img = check_img(img_filelist[0])
if not outdtype:
outdtype = img.dtype
mask_data, _ = load_mask_data(mask_file)
indices = np.where (mask_data)
mask = check_img(mask_file)
outmat = np.zeros((len(img_filelist), np.count_nonzero(mask_data)),
dtype=outdtype)
for i, img_item in enumerate(img_filelist):
img = check_img(img_item)
if not are_compatible_imgs(img, mask):
raise NiftiFilesNotCompatible(repr_imgs(img), repr_imgs(mask_file))
vol = get_img_data(img)
outmat[i, :] = vol[indices]
return outmat, mask_data
|
From the list of absolute paths to nifti files, creates a Numpy array
with the masked data.
Parameters
----------
img_filelist: list of str
List of absolute file paths to nifti files. All nifti files must have
the same shape.
mask_file: str
Path to a Nifti mask file.
Should be the same shape as the files in nii_filelist.
outdtype: dtype
Type of the elements of the array, if not set will obtain the dtype from
the first nifti file.
Returns
-------
outmat:
Numpy array with shape N x prod(vol.shape) containing the N files as flat vectors.
mask_indices:
Tuple with the 3D spatial indices of the masking voxels, for reshaping
with vol_shape and remapping.
vol_shape:
Tuple with shape of the volumes, for reshaping.
|
def _post_connect(self):
if not self.initiator:
if "plain" in self.auth_methods or "digest" in self.auth_methods:
self.set_iq_get_handler("query","jabber:iq:auth",
self.auth_in_stage1)
self.set_iq_set_handler("query","jabber:iq:auth",
self.auth_in_stage2)
elif self.registration_callback:
iq = Iq(stanza_type = "get")
iq.set_content(Register())
self.set_response_handlers(iq, self.registration_form_received, self.registration_error)
self.send(iq)
return
ClientStream._post_connect(self)
|
Initialize authentication when the connection is established
and we are the initiator.
|
def check_sla(self, sla, diff_metric):
try:
if sla.display is '%':
diff_val = float(diff_metric['percent_diff'])
else:
diff_val = float(diff_metric['absolute_diff'])
except ValueError:
return False
if not (sla.check_sla_passed(diff_val)):
self.sla_failures += 1
self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric))
return True
|
Check whether the SLA has passed or failed
|
def set(verbose,
host,
http_port,
ws_port,
use_https,
verify_ssl):
_config = GlobalConfigManager.get_config_or_default()
if verbose is not None:
_config.verbose = verbose
if host is not None:
_config.host = host
if http_port is not None:
_config.http_port = http_port
if ws_port is not None:
_config.ws_port = ws_port
if use_https is not None:
_config.use_https = use_https
if verify_ssl is False:
_config.verify_ssl = verify_ssl
GlobalConfigManager.set_config(_config)
Printer.print_success('Config was updated.')
CliConfigManager.purge()
|
Set the global config values.
Example:
\b
```bash
$ polyaxon config set --hots=localhost http_port=80
```
|
def var(nums, mean_func=amean, ddof=0):
r
x_bar = mean_func(nums)
return sum((x - x_bar) ** 2 for x in nums) / (len(nums) - ddof)
|
r"""Calculate the variance.
The variance (:math:`\sigma^2`) of a series of numbers (:math:`x_i`) with
mean :math:`\mu` and population :math:`N` is:
:math:`\sigma^2 = \frac{1}{N}\sum_{i=1}^{N}(x_i-\mu)^2`.
Cf. https://en.wikipedia.org/wiki/Variance
Parameters
----------
nums : list
A series of numbers
mean_func : function
A mean function (amean by default)
ddof : int
The degrees of freedom (0 by default)
Returns
-------
float
The variance of the values in the series
Examples
--------
>>> var([1, 1, 1, 1])
0.0
>>> var([1, 2, 3, 4])
1.25
>>> round(var([1, 2, 3, 4], ddof=1), 12)
1.666666666667
|
def adjacency2graph(adjacency, edge_type=None, adjust=1, **kwargs):
if isinstance(adjacency, np.ndarray):
adjacency = _matrix2dict(adjacency)
elif isinstance(adjacency, dict):
adjacency = _dict2dict(adjacency)
else:
msg = ("If the adjacency parameter is supplied it must be a "
"dict, or a numpy.ndarray.")
raise TypeError(msg)
if edge_type is None:
edge_type = {}
else:
if isinstance(edge_type, np.ndarray):
edge_type = _matrix2dict(edge_type, etype=True)
elif isinstance(edge_type, dict):
edge_type = _dict2dict(edge_type)
for u, ty in edge_type.items():
for v, et in ty.items():
adjacency[u][v]['edge_type'] = et
g = nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())
adjacency = nx.to_dict_of_dicts(g)
adjacency = _adjacency_adjust(adjacency, adjust, True)
return nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())
|
Takes an adjacency list, dict, or matrix and returns a graph.
The purpose of this function is take an adjacency list (or matrix)
and return a :class:`.QueueNetworkDiGraph` that can be used with a
:class:`.QueueNetwork` instance. The Graph returned has the
``edge_type`` edge property set for each edge. Note that the graph may
be altered.
Parameters
----------
adjacency : dict or :class:`~numpy.ndarray`
An adjacency list as either a dict, or an adjacency matrix.
adjust : int ``{1, 2}`` (optional, default: 1)
Specifies what to do when the graph has terminal vertices
(nodes with no out-edges). Note that if ``adjust`` is not 2
then it is assumed to be 1. There are two choices:
* ``adjust = 1``: A loop is added to each terminal node in the
graph, and their ``edge_type`` of that loop is set to 0.
* ``adjust = 2``: All edges leading to terminal nodes have
their ``edge_type`` set to 0.
**kwargs :
Unused.
Returns
-------
out : :any:`networkx.DiGraph`
A directed graph with the ``edge_type`` edge property.
Raises
------
TypeError
Is raised if ``adjacency`` is not a dict or
:class:`~numpy.ndarray`.
Examples
--------
If terminal nodes are such that all in-edges have edge type ``0``
then nothing is changed. However, if a node is a terminal node then
a loop is added with edge type 0.
>>> import queueing_tool as qt
>>> adj = {
... 0: {1: {}},
... 1: {2: {},
... 3: {}},
... 3: {0: {}}}
>>> eTy = {0: {1: 1}, 1: {2: 2, 3: 4}, 3: {0: 1}}
>>> # A loop will be added to vertex 2
>>> g = qt.adjacency2graph(adj, edge_type=eTy)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),
(2, {2: {'edge_type': 0}}),
(3, {0: {'edge_type': 1}})]
You can use a dict of lists to represent the adjacency list.
>>> adj = {0 : [1], 1: [2, 3], 3: [0]}
>>> g = qt.adjacency2graph(adj, edge_type=eTy)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),
(2, {2: {'edge_type': 0}}),
(3, {0: {'edge_type': 1}})]
Alternatively, you could have this function adjust the edges that
lead to terminal vertices by changing their edge type to 0:
>>> # The graph is unaltered
>>> g = qt.adjacency2graph(adj, edge_type=eTy, adjust=2)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 0}, 3: {'edge_type': 4}}),
(2, {}),
(3, {0: {'edge_type': 1}})]
|
def _parse_genotype(self, vcf_fields):
format_col = vcf_fields[8].split(':')
genome_data = vcf_fields[9].split(':')
try:
gt_idx = format_col.index('GT')
except ValueError:
return []
return [int(x) for x in re.split(r'[\|/]', genome_data[gt_idx]) if
x != '.']
|
Parse genotype from VCF line data
|
def create_projection(self, fov: float = 75.0, near: float = 1.0, far: float = 100.0, aspect_ratio: float = None):
return matrix44.create_perspective_projection_matrix(
fov,
aspect_ratio or self.window.aspect_ratio,
near,
far,
dtype='f4',
)
|
Create a projection matrix with the following parameters.
When ``aspect_ratio`` is not provided the configured aspect
ratio for the window will be used.
Args:
fov (float): Field of view (float)
near (float): Camera near value
far (float): Camrea far value
Keyword Args:
aspect_ratio (float): Aspect ratio of the viewport
Returns:
The projection matrix as a float32 :py:class:`numpy.array`
|
def send(self, message, json=False, callback=None):
pkt = dict(type="message", data=message, endpoint=self.ns_name)
if json:
pkt['type'] = "json"
if callback:
pkt['ack'] = True
pkt['id'] = msgid = self.socket._get_next_msgid()
self.socket._save_ack_callback(msgid, callback)
self.socket.send_packet(pkt)
|
Use send to send a simple string message.
If ``json`` is True, the message will be encoded as a JSON object
on the wire, and decoded on the other side.
This is mostly for backwards compatibility. ``emit()`` is more fun.
:param callback: This is a callback function that will be
called automatically by the client upon
reception. It does not verify that the
listener over there was completed with
success. It just tells you that the browser
got a hold of the packet.
:type callback: callable
|
def hubspot(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return HubSpotNode()
|
HubSpot tracking template tag.
Renders Javascript code to track page visits. You must supply
your portal ID (as a string) in the ``HUBSPOT_PORTAL_ID`` setting.
|
def zoom_region(self):
where = np.array(np.where(np.invert(self.astype('bool'))))
y0, x0 = np.amin(where, axis=1)
y1, x1 = np.amax(where, axis=1)
return [y0, y1+1, x0, x1+1]
|
The zoomed rectangular region corresponding to the square encompassing all unmasked values.
This is used to zoom in on the region of an image that is used in an analysis for visualization.
|
def record_schema(self):
schema_path = current_jsonschemas.url_to_path(self['$schema'])
schema_prefix = current_app.config['DEPOSIT_JSONSCHEMAS_PREFIX']
if schema_path and schema_path.startswith(schema_prefix):
return current_jsonschemas.path_to_url(
schema_path[len(schema_prefix):]
)
|
Convert deposit schema to a valid record schema.
|
def _post(self, url, data, scope):
self._create_session(scope)
response = self.session.post(url, data=data)
return response.status_code, response.text
|
Make a POST request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a POST request to.
data (str): The json encoded payload to POST.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
|
def apply_mask(self, mask_img):
self.set_mask(mask_img)
return self.get_data(masked=True, smoothed=True, safe_copy=True)
|
First set_mask and the get_masked_data.
Parameters
----------
mask_img: nifti-like image, NeuroImage or str
3D mask array: True where a voxel should be used.
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
The masked data deepcopied
|
def scaper_to_tag(annotation):
annotation.namespace = 'tag_open'
data = annotation.pop_data()
for obs in data:
annotation.append(time=obs.time, duration=obs.duration,
confidence=obs.confidence, value=obs.value['label'])
return annotation
|
Convert scaper annotations to tag_open
|
def padded_blurred_image_2d_from_padded_image_1d_and_psf(self, padded_image_1d, psf):
padded_model_image_1d = self.convolve_array_1d_with_psf(padded_array_1d=padded_image_1d, psf=psf)
return self.scaled_array_2d_from_array_1d(array_1d=padded_model_image_1d)
|
Compute a 2D padded blurred image from a 1D padded image.
Parameters
----------
padded_image_1d : ndarray
A 1D unmasked image which is blurred with the PSF.
psf : ndarray
An array describing the PSF kernel of the image.
|
def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0):
self.clear_color = (red, green, blue, alpha)
self.clear_depth = depth
|
Sets the clear values for the window buffer.
Args:
red (float): red compoent
green (float): green compoent
blue (float): blue compoent
alpha (float): alpha compoent
depth (float): depth value
|
def build(ctx, project, build):
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['build'] = build
|
Commands for build jobs.
|
def hierarchy(annotation, **kwargs):
htimes, hlabels = hierarchy_flatten(annotation)
htimes = [np.asarray(_) for _ in htimes]
return mir_eval.display.hierarchy(htimes, hlabels, **kwargs)
|
Plotting wrapper for hierarchical segmentations
|
def upload_stream(self, destination, *, offset=0):
return self.get_stream(
"STOR " + str(destination),
"1xx",
offset=offset,
)
|
Create stream for write data to `destination` file.
:param destination: destination path of file on server side
:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
:param offset: byte offset for stream start position
:type offset: :py:class:`int`
:rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`
|
async def set_tz(self):
settings = await self.api.account.settings.get()
tz = settings.time_zone.tzinfo_name
os.environ['TZ'] = tz
time.tzset()
|
set the environment timezone to the timezone
set in your twitter settings
|
def execute_async_script(self, script, *args):
return self._execute(Command.EXECUTE_ASYNC_SCRIPT, {
'script': script,
'args': list(args)})
|
Execute JavaScript Asynchronously in current context.
Support:
Web(WebView)
Args:
script: The JavaScript to execute.
*args: Arguments for your JavaScript.
Returns:
Returns the return value of the function.
|
def fetch_by_name(self, name):
service = self.collection.find_one({'name': name})
if not service:
raise ServiceNotFound
return Service(service)
|
Gets service for given ``name`` from mongodb storage.
|
def MOVBE(cpu, dest, src):
size = dest.size
arg0 = dest.read()
temp = 0
for pos in range(0, size, 8):
temp = (temp << 8) | (arg0 & 0xff)
arg0 = arg0 >> 8
dest.write(arg0)
|
Moves data after swapping bytes.
Performs a byte swap operation on the data copied from the second operand (source operand) and store the result
in the first operand (destination operand). The source operand can be a general-purpose register, or memory location; the destination register can be a general-purpose register, or a memory location; however, both operands can
not be registers, and only one operand can be a memory location. Both operands must be the same size, which can
be a word, a doubleword or quadword.
The MOVBE instruction is provided for swapping the bytes on a read from memory or on a write to memory; thus
providing support for converting little-endian values to big-endian format and vice versa.
In 64-bit mode, the instruction's default operation size is 32 bits. Use of the REX.R prefix permits access to additional registers (R8-R15). Use of the REX.W prefix promotes operation to 64 bits::
TEMP = SRC
IF ( OperandSize = 16)
THEN
DEST[7:0] = TEMP[15:8];
DEST[15:8] = TEMP[7:0];
ELSE IF ( OperandSize = 32)
DEST[7:0] = TEMP[31:24];
DEST[15:8] = TEMP[23:16];
DEST[23:16] = TEMP[15:8];
DEST[31:23] = TEMP[7:0];
ELSE IF ( OperandSize = 64)
DEST[7:0] = TEMP[63:56];
DEST[15:8] = TEMP[55:48];
DEST[23:16] = TEMP[47:40];
DEST[31:24] = TEMP[39:32];
DEST[39:32] = TEMP[31:24];
DEST[47:40] = TEMP[23:16];
DEST[55:48] = TEMP[15:8];
DEST[63:56] = TEMP[7:0];
FI;
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
|
def _get_job_metadata(provider, user_id, job_name, script, task_ids,
user_project, unique_job_id):
create_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())
user_id = user_id or dsub_util.get_os_user()
job_metadata = provider.prepare_job_metadata(script.name, job_name, user_id,
create_time)
if unique_job_id:
job_metadata['job-id'] = uuid.uuid4().hex
job_metadata['create-time'] = create_time
job_metadata['script'] = script
job_metadata['user-project'] = user_project
if task_ids:
job_metadata['task-ids'] = dsub_util.compact_interval_string(list(task_ids))
return job_metadata
|
Allow provider to extract job-specific metadata from command-line args.
Args:
provider: job service provider
user_id: user submitting the job
job_name: name for the job
script: the script to run
task_ids: a set of the task-ids for all tasks in the job
user_project: name of the project to be billed for the request
unique_job_id: generate a unique job id
Returns:
A dictionary of job-specific metadata (such as job id, name, etc.)
|
def contains(self, *items):
if len(items) == 0:
raise ValueError('one or more args must be given')
elif len(items) == 1:
if items[0] not in self.val:
if self._check_dict_like(self.val, return_as_bool=True):
self._err('Expected <%s> to contain key <%s>, but did not.' % (self.val, items[0]))
else:
self._err('Expected <%s> to contain item <%s>, but did not.' % (self.val, items[0]))
else:
missing = []
for i in items:
if i not in self.val:
missing.append(i)
if missing:
if self._check_dict_like(self.val, return_as_bool=True):
self._err('Expected <%s> to contain keys %s, but did not contain key%s %s.' % (self.val, self._fmt_items(items), '' if len(missing) == 0 else 's', self._fmt_items(missing)))
else:
self._err('Expected <%s> to contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing)))
return self
|
Asserts that val contains the given item or items.
|
def get_key(self, *args, **kwargs):
if kwargs.pop('force', None):
headers = kwargs.get('headers', {})
headers['force'] = True
kwargs['headers'] = headers
return super(Bucket, self).get_key(*args, **kwargs)
|
Return the key from MimicDB.
:param boolean force: If true, API call is forced to S3
|
def save_validation_log(self, **kwargs):
self._fill_project_info(kwargs)
kwargs.update({'time': datetime.utcnow()})
_result = self.db.ValidLog.insert_one(kwargs)
_log = self._print_dict(kwargs)
logging.info("[Database] valid log: " + _log)
|
Saves the validation log, timestamp will be added automatically.
Parameters
-----------
kwargs : logging information
Events, such as accuracy, loss, step number and etc.
Examples
---------
>>> db.save_validation_log(accuracy=0.33, loss=0.98)
|
def autocommit(self):
if len(self.cursors.keys()) == 0:
self.connection.autocommit = True
else:
raise AttributeError('database cursors are already active, '
'cannot switch to autocommit now')
|
This sets the database connection to autocommit. Must be called before
any cursors have been instantiated.
|
def _get_node_names(h5file, h5path='/', node_type=h5py.Dataset):
if isinstance(h5file, str):
_h5file = get_h5file(h5file, mode='r')
else:
_h5file = h5file
if not h5path.startswith('/'):
h5path = '/' + h5path
names = []
try:
h5group = _h5file.require_group(h5path)
for node in _hdf5_walk(h5group, node_type=node_type):
names.append(node.name)
except:
raise RuntimeError('Error getting node names from {}/{}.'.format(_h5file.filename, h5path))
finally:
if isinstance(h5file, str):
_h5file.close()
return names
|
Return the node of type node_type names within h5path of h5file.
Parameters
----------
h5file: h5py.File
HDF5 file object
h5path: str
HDF5 group path to get the group names from
node_type: h5py object type
HDF5 object type
Returns
-------
names: list of str
List of names
|
def _update_optional(cobra_object, new_dict, optional_attribute_dict,
ordered_keys):
for key in ordered_keys:
default = optional_attribute_dict[key]
value = getattr(cobra_object, key)
if value is None or value == default:
continue
new_dict[key] = _fix_type(value)
|
update new_dict with optional attributes from cobra_object
|
def ghmean(nums):
m_g = gmean(nums)
m_h = hmean(nums)
if math.isnan(m_g) or math.isnan(m_h):
return float('nan')
while round(m_h, 12) != round(m_g, 12):
m_g, m_h = (m_g * m_h) ** (1 / 2), (2 * m_g * m_h) / (m_g + m_h)
return m_g
|
Return geometric-harmonic mean.
Iterates between geometric & harmonic means until they converge to
a single value (rounded to 12 digits).
Cf. https://en.wikipedia.org/wiki/Geometric-harmonic_mean
Parameters
----------
nums : list
A series of numbers
Returns
-------
float
The geometric-harmonic mean of nums
Examples
--------
>>> ghmean([1, 2, 3, 4])
2.058868154613003
>>> ghmean([1, 2])
1.3728805006183502
>>> ghmean([0, 5, 1000])
0.0
>>> ghmean([0, 0])
0.0
>>> ghmean([0, 0, 5])
nan
|
def post_deploy(self):
for service in self.genv.services:
service = service.strip().upper()
self.vprint('post_deploy:', service)
funcs = common.service_post_deployers.get(service)
if funcs:
self.vprint('Running post-deployments for service %s...' % (service,))
for func in funcs:
try:
func()
except Exception as e:
print('Post deployment error: %s' % e, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
|
Runs methods services have requested be run before after deployment.
|
def draw_cornu_bezier(x0, y0, t0, t1, s0, c0, flip, cs, ss, cmd, scale, rot):
s = None
for j in range(0, 5):
t = j * .2
t2 = t+ .2
curvetime = t0 + t * (t1 - t0)
curvetime2 = t0 + t2 * (t1 - t0)
Dt = (curvetime2 - curvetime) * scale
if not s:
s, c = eval_cornu(curvetime)
s *= flip
s -= s0
c -= c0
dx1 = cos(pow(curvetime, 2) + (flip * rot))
dy1 = flip * sin(pow(curvetime, 2) + (flip *rot))
x = ((c * cs - s * ss) +x0)
y = ((s * cs + c * ss) + y0)
s2,c2 = eval_cornu(curvetime2)
s2 *= flip
s2 -= s0
c2 -= c0
dx2 = cos(pow(curvetime2, 2) + (flip * rot))
dy2 = flip * sin(pow(curvetime2, 2) + (flip * rot))
x3 = ((c2 * cs - s2 * ss)+x0)
y3 = ((s2 * cs + c2 * ss)+y0)
x1 = (x + ((Dt/3.0) * dx1))
y1 = (y + ((Dt/3.0) * dy1))
x2 = (x3 - ((Dt/3.0) * dx2))
y2 = (y3 - ((Dt/3.0) * dy2))
if cmd == 'moveto':
print_pt(x, y, cmd)
cmd = 'curveto'
print_crv(x1, y1, x2, y2, x3, y3)
dx1, dy1 = dx2, dy2
x,y = x3, y3
return cmd
|
Mark Meyer's code draws elegant CURVETO segments.
|
def _kwargs(self):
return dict(color=self.color, velocity=self.velocity, colors=self.colors)
|
Keyword arguments for recreating the Shape from the vertices.
|
def autocorr_magseries(times, mags, errs,
maxlags=1000,
func=_autocorr_func3,
fillgaps=0.0,
filterwindow=11,
forcetimebin=None,
sigclip=3.0,
magsarefluxes=False,
verbose=True):
interpolated = fill_magseries_gaps(times, mags, errs,
fillgaps=fillgaps,
forcetimebin=forcetimebin,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
filterwindow=filterwindow,
verbose=verbose)
if not interpolated:
print('failed to interpolate light curve to minimum cadence!')
return None
itimes, imags = interpolated['itimes'], interpolated['imags'],
if maxlags:
lags = nparange(0, maxlags)
else:
lags = nparange(itimes.size)
series_stdev = 1.483*npmedian(npabs(imags))
if func != _autocorr_func3:
autocorr = nparray([func(imags, x, imags.size, 0.0, series_stdev)
for x in lags])
else:
autocorr = _autocorr_func3(imags, lags[0], imags.size,
0.0, series_stdev)
if maxlags is not None:
autocorr = autocorr[:maxlags]
interpolated.update({'minitime':itimes.min(),
'lags':lags,
'acf':autocorr})
return interpolated
|
This calculates the ACF of a light curve.
This will pre-process the light curve to fill in all the gaps and normalize
everything to zero. If `fillgaps = 'noiselevel'`, fills the gaps with the
noise level obtained via the procedure above. If `fillgaps = 'nan'`, fills
the gaps with `np.nan`.
Parameters
----------
times,mags,errs : np.array
The measurement time-series and associated errors.
maxlags : int
The maximum number of lags to calculate.
func : Python function
This is a function to calculate the lags.
fillgaps : 'noiselevel' or float
This sets what to use to fill in gaps in the time series. If this is
'noiselevel', will smooth the light curve using a point window size of
`filterwindow` (this should be an odd integer), subtract the smoothed LC
from the actual LC and estimate the RMS. This RMS will be used to fill
in the gaps. Other useful values here are 0.0, and npnan.
filterwindow : int
The light curve's smoothing filter window size to use if
`fillgaps='noiselevel`'.
forcetimebin : None or float
This is used to force a particular cadence in the light curve other than
the automatically determined cadence. This effectively rebins the light
curve to this cadence. This should be in the same time units as `times`.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If your input measurements in `mags` are actually fluxes instead of
mags, set this is True.
verbose : bool
If True, will indicate progress and report errors.
Returns
-------
dict
A dict of the following form is returned::
{'itimes': the interpolated time values after gap-filling,
'imags': the interpolated mag/flux values after gap-filling,
'ierrs': the interpolated mag/flux values after gap-filling,
'cadence': the cadence of the output mag/flux time-series,
'minitime': the minimum value of the interpolated times array,
'lags': the lags used to calculate the auto-correlation function,
'acf': the value of the ACF at each lag used}
|
def outfile(self, p):
if self.outdir is not None:
return os.path.join(self.outdir, os.path.basename(p))
else:
return p
|
Path for an output file.
If :attr:`outdir` is set then the path is
``outdir/basename(p)`` else just ``p``
|
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
|
Copy dataset, replicating each example in proportion to its weight.
|
def associate_notification_template(self, workflow,
notification_template, status):
return self._assoc('notification_templates_%s' % status,
workflow, notification_template)
|
Associate a notification template from this workflow.
=====API DOCS=====
Associate a notification template from this workflow job template.
:param workflow: The workflow job template to associate to.
:type workflow: str
:param notification_template: The notification template to be associated.
:type notification_template: str
:param status: type of notification this notification template should be associated to.
:type status: str
:returns: Dictionary of only one key "changed", which indicates whether the association succeeded.
:rtype: dict
=====API DOCS=====
|
def _combineargs(self, *args, **kwargs):
d = {arg: True for arg in args}
d.update(kwargs)
return d
|
Add switches as 'options' with value True to the options dict.
|
def last_arg_decorator(func):
@wraps(func)
def decorator(*args, **kwargs):
if signature_matches(func, args, kwargs):
return func(*args, **kwargs)
else:
return lambda last: func(*(args + (last,)), **kwargs)
return decorator
|
Allows a function to be used as either a decorator with args, or called as
a normal function.
@last_arg_decorator
def register_a_thing(foo, func, bar=True):
..
# Called as a decorator
@register_a_thing("abc", bar=False)
def my_func():
...
# Called as a normal function call
def my_other_func():
...
register_a_thing("def", my_other_func, bar=True)
|
def create(self, data):
if 'name' not in data:
raise KeyError('The file must have a name')
if 'file_data' not in data:
raise KeyError('The file must have file_data')
response = self._mc_client._post(url=self._build_path(), data=data)
if response is not None:
self.file_id = response['id']
else:
self.file_id = None
return response
|
Upload a new image or file to the File Manager.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*,
"file_data": string*
}
|
def get_all_domains(self):
data = self.get_data("domains/")
domains = list()
for jsoned in data['domains']:
domain = Domain(**jsoned)
domain.token = self.token
domains.append(domain)
return domains
|
This function returns a list of Domain object.
|
def selectlastrow(self, window_name, object_name):
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
cell = object_handle.AXRows[-1]
if not cell.AXSelected:
object_handle.activate()
cell.AXSelected = True
else:
pass
return 1
|
Select last row
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
|
def build(ctx, dput='', opts=''):
with io.open('debian/changelog', encoding='utf-8') as changes:
metadata = re.match(r'^([^ ]+) \(([^)]+)\) ([^;]+); urgency=(.+)$', changes.readline().rstrip())
if not metadata:
notify.failure('Badly formatted top entry in changelog')
name, version, _, _ = metadata.groups()
ctx.run('dpkg-buildpackage {} {}'.format(ctx.rituals.deb.build.opts, opts))
if not os.path.exists('dist'):
os.makedirs('dist')
artifact_pattern = '{}?{}*'.format(name, re.sub(r'[^-_.a-zA-Z0-9]', '?', version))
changes_files = []
for debfile in glob.glob('../' + artifact_pattern):
shutil.move(debfile, 'dist')
if debfile.endswith('.changes'):
changes_files.append(os.path.join('dist', os.path.basename(debfile)))
ctx.run('ls -l dist/{}'.format(artifact_pattern))
if dput:
ctx.run('dput {} {}'.format(dput, ' '.join(changes_files)))
|
Build a DEB package.
|
def saveRecords(self, path='myOutput'):
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
import csv
with open(path+'.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(self.getAllFieldNames())
writer.writerow(self.getAllDataTypes())
writer.writerow(self.getAllFlags())
writer.writerows(self.getAllRecords())
if self.verbosity>0:
print '******', numRecords,'records exported in numenta format to file:',\
path,'******\n'
|
Export all the records into a csv file in numenta format.
Example header format:
fieldName1 fieldName2 fieldName3
date string float
T S
Parameters:
--------------------------------------------------------------------
path: Relative path of the file to which the records are to be exported
|
def copy_arguments_to_annotations(args, type_comment, *, is_method=False):
if isinstance(type_comment, ast3.Ellipsis):
return
expected = len(args.args)
if args.vararg:
expected += 1
expected += len(args.kwonlyargs)
if args.kwarg:
expected += 1
actual = len(type_comment) if isinstance(type_comment, list) else 1
if expected != actual:
if is_method and expected - actual == 1:
pass
else:
raise ValueError(
f"number of arguments in type comment doesn't match; " +
f"expected {expected}, found {actual}"
)
if isinstance(type_comment, list):
next_value = type_comment.pop
else:
_tc = type_comment
def next_value(index: int = 0) -> ast3.expr:
return _tc
for arg in args.args[expected - actual:]:
ensure_no_annotation(arg.annotation)
arg.annotation = next_value(0)
if args.vararg:
ensure_no_annotation(args.vararg.annotation)
args.vararg.annotation = next_value(0)
for arg in args.kwonlyargs:
ensure_no_annotation(arg.annotation)
arg.annotation = next_value(0)
if args.kwarg:
ensure_no_annotation(args.kwarg.annotation)
args.kwarg.annotation = next_value(0)
|
Copies AST nodes from `type_comment` into the ast3.arguments in `args`.
Does validaation of argument count (allowing for untyped self/cls)
and type (vararg and kwarg).
|
def find_unique(table, sample, unique_fields=None):
res = search_unique(table, sample, unique_fields)
if res is not None:
return res.eid
else:
return res
|
Search in `table` an item with the value of the `unique_fields` in the `sample` sample.
Check if the the obtained result is unique. If nothing is found will return an empty list,
if there is more than one item found, will raise an IndexError.
Parameters
----------
table: tinydb.table
sample: dict
Sample data
unique_fields: list of str
Name of fields (keys) from `data` which are going to be used to build
a sample to look for exactly the same values in the database.
If None, will use every key in `data`.
Returns
-------
eid: int
Id of the object found with same `unique_fields`.
None if none is found.
Raises
------
MoreThanOneItemError
If more than one example is found.
|
def density_between_circular_annuli_in_angular_units(self, inner_annuli_radius, outer_annuli_radius):
annuli_area = (np.pi * outer_annuli_radius ** 2.0) - (np.pi * inner_annuli_radius ** 2.0)
return (self.mass_within_circle_in_units(radius=outer_annuli_radius) -
self.mass_within_circle_in_units(radius=inner_annuli_radius)) \
/ annuli_area
|
Calculate the mass between two circular annuli and compute the density by dividing by the annuli surface
area.
The value returned by the mass integral is dimensionless, therefore the density between annuli is returned in \
units of inverse radius squared. A conversion factor can be specified to convert this to a physical value \
(e.g. the critical surface mass density).
Parameters
-----------
inner_annuli_radius : float
The radius of the inner annulus outside of which the density are estimated.
outer_annuli_radius : float
The radius of the outer annulus inside of which the density is estimated.
|
def split_multiline(value):
return [element for element in (line.strip() for line in value.split('\n'))
if element]
|
Split a multiline string into a list, excluding blank lines.
|
def plot_mask(mask, units, kpc_per_arcsec, pointsize, zoom_offset_pixels):
if mask is not None:
plt.gca()
edge_pixels = mask.masked_grid_index_to_pixel[mask.edge_pixels] + 0.5
if zoom_offset_pixels is not None:
edge_pixels -= zoom_offset_pixels
edge_arcsec = mask.grid_pixels_to_grid_arcsec(grid_pixels=edge_pixels)
edge_units = convert_grid_units(array=mask, grid_arcsec=edge_arcsec, units=units,
kpc_per_arcsec=kpc_per_arcsec)
plt.scatter(y=edge_units[:,0], x=edge_units[:,1], s=pointsize, c='k')
|
Plot the mask of the array on the figure.
Parameters
-----------
mask : ndarray of data.array.mask.Mask
The mask applied to the array, the edge of which is plotted as a set of points over the plotted array.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').
kpc_per_arcsec : float or None
The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.
pointsize : int
The size of the points plotted to show the mask.
|
def _apply_each_methods(self, i, r,
summarize=False,
report_unexpected_exceptions=True,
context=None):
for a in dir(self):
if a.startswith('each'):
rdict = self._as_dict(r)
f = getattr(self, a)
try:
f(rdict)
except Exception as e:
if report_unexpected_exceptions:
p = {'code': UNEXPECTED_EXCEPTION}
if not summarize:
p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)
p['row'] = i + 1
p['record'] = r
p['exception'] = e
p['function'] = '%s: %s' % (f.__name__,
f.__doc__)
if context is not None: p['context'] = context
yield p
|
Invoke 'each' methods on `r`.
|
def sim_crb_diff(std0, std1, N=10000):
a = std0*np.random.randn(N, len(std0))
b = std1*np.random.randn(N, len(std1))
return a - b
|
each element of std0 should correspond with the element of std1
|
def post_process(self, group, event, is_new, is_sample, **kwargs):
if not self.is_configured(group.project):
return
host = self.get_option('server_host', group.project)
port = int(self.get_option('server_port', group.project))
prefix = self.get_option('prefix', group.project)
hostname = self.get_option('hostname', group.project) or socket.gethostname()
resolve_age = group.project.get_option('sentry:resolve_age', None)
now = int(time.time())
template = '%s.%%s[%s]' % (prefix, group.project.slug)
level = group.get_level_display()
label = template % level
groups = group.project.group_set.filter(status=STATUS_UNRESOLVED)
if resolve_age:
oldest = timezone.now() - timedelta(hours=int(resolve_age))
groups = groups.filter(last_seen__gt=oldest)
num_errors = groups.filter(level=group.level).count()
metric = Metric(hostname, label, num_errors, now)
log.info('will send %s=%s to zabbix', label, num_errors)
send_to_zabbix([metric], host, port)
|
Process error.
|
def _key(cls, obs):
if not isinstance(obs, Observation):
raise JamsError('{} must be of type jams.Observation'.format(obs))
return obs.time
|
Provides sorting index for Observation objects
|
def execute(self, command, timeout=None):
try:
self.channel = self.ssh.get_transport().open_session()
except paramiko.SSHException as e:
self.unknown("Create channel error: %s" % e)
try:
self.channel.settimeout(self.args.timeout if not timeout else timeout)
except socket.timeout as e:
self.unknown("Settimeout for channel error: %s" % e)
try:
self.logger.debug("command: {}".format(command))
self.channel.exec_command(command)
except paramiko.SSHException as e:
self.unknown("Execute command error: %s" % e)
try:
self.stdin = self.channel.makefile('wb', -1)
self.stderr = map(string.strip, self.channel.makefile_stderr('rb', -1).readlines())
self.stdout = map(string.strip, self.channel.makefile('rb', -1).readlines())
except Exception as e:
self.unknown("Get result error: %s" % e)
try:
self.status = self.channel.recv_exit_status()
except paramiko.SSHException as e:
self.unknown("Get return code error: %s" % e)
else:
if self.status != 0:
self.unknown("Return code: %d , stderr: %s" % (self.status, self.errors))
else:
return self.stdout
finally:
self.logger.debug("Execute command finish.")
|
Execute a shell command.
|
def ismounted(device):
with settings(hide('running', 'stdout')):
res = run_as_root('mount')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
with settings(hide('running', 'stdout')):
res = run_as_root('swapon -s')
for line in res.splitlines():
fields = line.split()
if fields[0] == device:
return True
return False
|
Check if partition is mounted
Example::
from burlap.disk import ismounted
if ismounted('/dev/sda1'):
print ("disk sda1 is mounted")
|
def wait_for_readability(self):
with self.lock:
while True:
if self._socket is None or self._eof:
return False
if self._state in ("connected", "closing"):
return True
if self._state == "tls-handshake" and \
self._tls_state == "want_read":
return True
self._state_cond.wait()
|
Stop current thread until the channel is readable.
:Return: `False` if it won't be readable (e.g. is closed)
|
def download(self, source_file, target_folder=''):
current_folder = self._ftp.pwd()
if not target_folder.startswith('/'):
target_folder = join(getcwd(), target_folder)
folder = os.path.dirname(source_file)
self.cd(folder)
if folder.startswith("/"):
folder = folder[1:]
destination_folder = join(target_folder, folder)
if not os.path.exists(destination_folder):
print("Creating folder", destination_folder)
os.makedirs(destination_folder)
source_file = os.path.basename(source_file)
destination = join(destination_folder, source_file)
try:
with open(destination, 'wb') as result:
self._ftp.retrbinary('RETR %s' % (source_file,),
result.write)
except error_perm as e:
print(e)
remove(join(target_folder, source_file))
raise
self._ftp.cwd(current_folder)
|
Downloads a file from the FTP server to target folder
:param source_file: the absolute path for the file on the server
it can be the one of the files coming from
FtpHandler.dir().
:type source_file: string
:param target_folder: relative or absolute path of the
destination folder default is the
working directory.
:type target_folder: string
|
def ma(X, Q, M):
if Q <= 0 or Q >= M:
raise ValueError('Q(MA) must be in ]0,lag[')
a, rho, _c = yulewalker.aryule(X, M, 'biased')
a = np.insert(a, 0, 1)
ma_params, _p, _c = yulewalker.aryule(a, Q, 'biased')
return ma_params, rho
|
Moving average estimator.
This program provides an estimate of the moving average parameters
and driving noise variance for a data sequence based on a
long AR model and a least squares fit.
:param array X: The input data array
:param int Q: Desired MA model order (must be >0 and <M)
:param int M: Order of "long" AR model (suggest at least 2*Q )
:return:
* MA - Array of Q complex MA parameter estimates
* RHO - Real scalar of white noise variance estimate
.. plot::
:width: 80%
:include-source:
from spectrum import arma2psd, ma, marple_data
import pylab
# Estimate 15 Ma parameters
b, rho = ma(marple_data, 15, 30)
# Create the PSD from those MA parameters
psd = arma2psd(B=b, rho=rho, sides='centerdc')
# and finally plot the PSD
pylab.plot(pylab.linspace(-0.5, 0.5, 4096), 10 * pylab.log10(psd/max(psd)))
pylab.axis([-0.5, 0.5, -30, 0])
:reference: [Marple]_
|
def group_nodes_by_annotation_filtered(graph: BELGraph,
node_predicates: NodePredicates = None,
annotation: str = 'Subgraph',
) -> Mapping[str, Set[BaseEntity]]:
node_filter = concatenate_node_predicates(node_predicates)
return {
key: {
node
for node in nodes
if node_filter(graph, node)
}
for key, nodes in group_nodes_by_annotation(graph, annotation).items()
}
|
Group the nodes occurring in edges by the given annotation, with a node filter applied.
:param graph: A BEL graph
:param node_predicates: A predicate or list of predicates (graph, node) -> bool
:param annotation: The annotation to use for grouping
:return: A dictionary of {annotation value: set of nodes}
|
def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0):
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
shiftlevel = 0
if level < min_level:
shiftlevel = min_level - level
level = min_level
sections = [(lev+shiftlevel, tit) for lev,tit in sections]
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:], shiftlevel)
if more_info:
return (md, sections)
else:
return "\n".join(md)
|
Convert a docstring to a markdown text.
|
def _sync_content_metadata(self, serialized_data, http_method):
try:
status_code, response_body = getattr(self, '_' + http_method)(
urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path),
serialized_data,
self.CONTENT_PROVIDER_SCOPE
)
except requests.exceptions.RequestException as exc:
raise ClientError(
'DegreedAPIClient request failed: {error} {message}'.format(
error=exc.__class__.__name__,
message=str(exc)
)
)
if status_code >= 400:
raise ClientError(
'DegreedAPIClient request failed with status {status_code}: {message}'.format(
status_code=status_code,
message=response_body
)
)
|
Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails.
|
def create_from_settings(settings):
return Connection(
settings["url"],
settings["base_url"],
settings["user"],
settings["password"],
authorizations = settings["authorizations"],
debug = settings["debug"]
)
|
Create a connection with given settings.
Args:
settings (dict): A dictionary of settings
Returns:
:class:`Connection`. The connection
|
def window_riemann(N):
r
n = linspace(-N/2., (N)/2., N)
w = sin(n/float(N)*2.*pi) / (n / float(N)*2.*pi)
return w
|
r"""Riemann tapering window
:param int N: window length
.. math:: w(n) = 1 - \left| \frac{n}{N/2} \right|^2
with :math:`-N/2 \leq n \leq N/2`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'riesz')
.. seealso:: :func:`create_window`, :class:`Window`
|
def prefetch_users(persistent_course_grades):
users = User.objects.filter(
id__in=[grade.user_id for grade in persistent_course_grades]
)
return {
user.id: user for user in users
}
|
Prefetch Users from the list of user_ids present in the persistent_course_grades.
Arguments:
persistent_course_grades (list): A list of PersistentCourseGrade.
Returns:
(dict): A dictionary containing user_id to user mapping.
|
def subtract_metabolites(self, metabolites, combine=True, reversibly=True):
self.add_metabolites({
k: -v for k, v in iteritems(metabolites)},
combine=combine, reversibly=reversibly)
|
Subtract metabolites from a reaction.
That means add the metabolites with -1*coefficient. If the final
coefficient for a metabolite is 0 then the metabolite is removed from
the reaction.
Notes
-----
* A final coefficient < 0 implies a reactant.
* The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolites : dict
Dictionary where the keys are of class Metabolite and the values
are the coefficients. These metabolites will be added to the
reaction.
combine : bool
Describes behavior a metabolite already exists in the reaction.
True causes the coefficients to be added.
False causes the coefficient to be replaced.
reversibly : bool
Whether to add the change to the context to make the change
reversibly or not (primarily intended for internal use).
|
def extend_model(self, exchange_reactions=False, demand_reactions=True):
for rxn in self.universal.reactions:
rxn.gapfilling_type = 'universal'
new_metabolites = self.universal.metabolites.query(
lambda metabolite: metabolite not in self.model.metabolites
)
self.model.add_metabolites(new_metabolites)
existing_exchanges = []
for rxn in self.universal.boundary:
existing_exchanges = existing_exchanges + \
[met.id for met in list(rxn.metabolites)]
for met in self.model.metabolites:
if exchange_reactions:
if met.id not in existing_exchanges:
rxn = self.universal.add_boundary(
met, type='exchange_smiley', lb=-1000, ub=0,
reaction_id='EX_{}'.format(met.id))
rxn.gapfilling_type = 'exchange'
if demand_reactions:
rxn = self.universal.add_boundary(
met, type='demand_smiley', lb=0, ub=1000,
reaction_id='DM_{}'.format(met.id))
rxn.gapfilling_type = 'demand'
new_reactions = self.universal.reactions.query(
lambda reaction: reaction not in self.model.reactions
)
self.model.add_reactions(new_reactions)
|
Extend gapfilling model.
Add reactions from universal model and optionally exchange and
demand reactions for all metabolites in the model to perform
gapfilling on.
Parameters
----------
exchange_reactions : bool
Consider adding exchange (uptake) reactions for all metabolites
in the model.
demand_reactions : bool
Consider adding demand reactions for all metabolites.
|
def _read_all_z_variable_info(self):
self.z_variable_info = {}
self.z_variable_names_by_num = {}
info = fortran_cdf.z_var_all_inquire(self.fname, self._num_z_vars,
len(self.fname))
status = info[0]
data_types = info[1]
num_elems = info[2]
rec_varys = info[3]
dim_varys = info[4]
num_dims = info[5]
dim_sizes = info[6]
rec_nums = info[7]
var_nums = info[8]
var_names = info[9]
if status == 0:
for i in np.arange(len(data_types)):
out = {}
out['data_type'] = data_types[i]
out['num_elems'] = num_elems[i]
out['rec_vary'] = rec_varys[i]
out['dim_varys'] = dim_varys[i]
out['num_dims'] = num_dims[i]
out['dim_sizes'] = dim_sizes[i, :1]
if out['dim_sizes'][0] == 0:
out['dim_sizes'][0] += 1
out['rec_num'] = rec_nums[i]
out['var_num'] = var_nums[i]
var_name = ''.join(var_names[i].astype('U'))
out['var_name'] = var_name.rstrip()
self.z_variable_info[out['var_name']] = out
self.z_variable_names_by_num[out['var_num']] = var_name
else:
raise IOError(fortran_cdf.statusreporter(status))
|
Gets all CDF z-variable information, not data though.
Maps to calls using var_inquire. Gets information on
data type, number of elements, number of dimensions, etc.
|
def build_delete_node_by_hash(manager: Manager) -> Callable[[BELGraph, str], None]:
@in_place_transformation
def delete_node_by_hash(graph: BELGraph, node_hash: str) -> None:
node = manager.get_dsl_by_hash(node_hash)
graph.remove_node(node)
return delete_node_by_hash
|
Make a delete function that's bound to the manager.
|
def bond_task(
perc_graph_result, seeds, ps, convolution_factors_tasks_iterator
):
convolution_factors_tasks = list(convolution_factors_tasks_iterator)
return reduce(
percolate.hpc.bond_reduce,
map(
bond_run,
itertools.repeat(perc_graph_result),
seeds,
itertools.repeat(ps),
itertools.repeat(convolution_factors_tasks),
)
)
|
Perform a number of runs
The number of runs is the number of seeds
convolution_factors_tasks_iterator needs to be an iterator
We shield the convolution factors tasks from jug value/result mechanism
by supplying an iterator to the list of tasks for lazy evaluation
http://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L100
http://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L455
|
def vagrant(self, name=''):
r = self.local_renderer
config = self.ssh_config(name)
extra_args = self._settings_dict(config)
r.genv.update(extra_args)
|
Run the following tasks on a vagrant box.
First, you need to import this task in your ``fabfile.py``::
from fabric.api import *
from burlap.vagrant import vagrant
@task
def some_task():
run('echo hello')
Then you can easily run tasks on your current Vagrant box::
$ fab vagrant some_task
|
def BSWAP(cpu, dest):
parts = []
arg0 = dest.read()
for i in range(0, dest.size, 8):
parts.append(Operators.EXTRACT(arg0, i, 8))
dest.write(Operators.CONCAT(8 * len(parts), *parts))
|
Byte swap.
Reverses the byte order of a 32-bit (destination) register: bits 0 through
7 are swapped with bits 24 through 31, and bits 8 through 15 are swapped
with bits 16 through 23. This instruction is provided for converting little-endian
values to big-endian format and vice versa.
To swap bytes in a word value (16-bit register), use the XCHG instruction.
When the BSWAP instruction references a 16-bit register, the result is
undefined::
TEMP = DEST
DEST[7..0] = TEMP[31..24]
DEST[15..8] = TEMP[23..16]
DEST[23..16] = TEMP[15..8]
DEST[31..24] = TEMP[7..0]
:param cpu: current CPU.
:param dest: destination operand.
|
def is_displayed(target):
is_displayed = getattr(target, 'is_displayed', None)
if not is_displayed or not callable(is_displayed):
raise TypeError('Target has no attribute \'is_displayed\' or not callable')
if not is_displayed():
raise WebDriverException('element not visible')
|
Assert whether the target is displayed
Args:
target(WebElement): WebElement Object.
Returns:
Return True if the element is displayed or return False otherwise.
|
def sendToSbs(self, challenge_id, item_id):
method = 'PUT'
url = 'sbs/challenge/%s/squad' % challenge_id
squad = self.sbsSquad(challenge_id)
players = []
moved = False
n = 0
for i in squad['squad']['players']:
if i['itemData']['id'] == item_id:
return False
if i['itemData']['id'] == 0 and not moved:
i['itemData']['id'] = item_id
moved = True
players.append({"index": n,
"itemData": {"id": i['itemData']['id'],
"dream": False}})
n += 1
data = {'players': players}
if not moved:
return False
else:
self.__request__(method, url, data=json.dumps(data))
return True
|
Send card FROM CLUB to first free slot in sbs squad.
|
def load_state(self, state_id, delete=True):
return self._store.load_state(f'{self._prefix}{state_id:08x}{self._suffix}', delete=delete)
|
Load a state from storage identified by `state_id`.
:param state_id: The state reference of what to load
:return: The deserialized state
:rtype: State
|
def set_fields(self):
if self.is_initialized:
self.model_map_dict = self.create_document_dictionary(self.model_instance)
else:
self.model_map_dict = self.create_document_dictionary(self.model)
form_field_dict = self.get_form_field_dict(self.model_map_dict)
self.set_form_fields(form_field_dict)
|
Sets existing data to form fields.
|
def _auto_client_files(cls, client, ca_path=None, ca_contents=None, cert_path=None,
cert_contents=None, key_path=None, key_contents=None):
files = []
if ca_path and ca_contents:
client['ca'] = ca_path
files.append(dict(path=ca_path,
contents=ca_contents,
mode=DEFAULT_FILE_MODE))
if cert_path and cert_contents:
client['cert'] = cert_path
files.append(dict(path=cert_path,
contents=cert_contents,
mode=DEFAULT_FILE_MODE))
if key_path and key_contents:
client['key'] = key_path
files.append(dict(path=key_path,
contents=key_contents,
mode=DEFAULT_FILE_MODE,))
return files
|
returns a list of NetJSON extra files for automatically generated clients
produces side effects in ``client`` dictionary
|
def _build_query(self, query_string, no_params=False):
try:
query = quote(query_string.format(u=self.library_id, t=self.library_type))
except KeyError as err:
raise ze.ParamNotPassed("There's a request parameter missing: %s" % err)
if no_params is False:
if not self.url_params:
self.add_parameters()
query = "%s?%s" % (query, self.url_params)
return query
|
Set request parameters. Will always add the user ID if it hasn't
been specifically set by an API method
|
def add(self, stream_id, task_ids, grouping, source_comp_name):
if stream_id not in self.targets:
self.targets[stream_id] = []
self.targets[stream_id].append(Target(task_ids, grouping, source_comp_name))
|
Adds the target component
:type stream_id: str
:param stream_id: stream id into which tuples are emitted
:type task_ids: list of str
:param task_ids: list of task ids to which tuples are emitted
:type grouping: ICustomStreamGrouping object
:param grouping: custom grouping to use
:type source_comp_name: str
:param source_comp_name: source component name
|
def save_ckpt(
sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list=None, global_step=None, printable=False
):
if sess is None:
raise ValueError("session is None.")
if var_list is None:
var_list = []
ckpt_file = os.path.join(save_dir, mode_name)
if var_list == []:
var_list = tf.global_variables()
logging.info("[*] save %s n_params: %d" % (ckpt_file, len(var_list)))
if printable:
for idx, v in enumerate(var_list):
logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape())))
saver = tf.train.Saver(var_list)
saver.save(sess, ckpt_file, global_step=global_step)
|
Save parameters into `ckpt` file.
Parameters
------------
sess : Session
TensorFlow Session.
mode_name : str
The name of the model, default is ``model.ckpt``.
save_dir : str
The path / file directory to the `ckpt`, default is ``checkpoint``.
var_list : list of tensor
The parameters / variables (tensor) to be saved. If empty, save all global variables (default).
global_step : int or None
Step number.
printable : boolean
Whether to print all parameters information.
See Also
--------
load_ckpt
|
def _filter(filterObj, **kwargs):
for key, value in kwargs.items():
if key.endswith('__ne'):
notFilter = True
key = key[:-4]
else:
notFilter = False
if key not in filterObj.indexedFields:
raise ValueError('Field "' + key + '" is not in INDEXED_FIELDS array. Filtering is only supported on indexed fields.')
if notFilter is False:
filterObj.filters.append( (key, value) )
else:
filterObj.notFilters.append( (key, value) )
return filterObj
|
Internal for handling filters; the guts of .filter and .filterInline
|
def new_address(self, label=None):
return self._backend.new_address(account=self.index, label=label)
|
Creates a new address.
:param label: address label as `str`
:rtype: :class:`SubAddress <monero.address.SubAddress>`
|
def set_embedded_doc(self, document, form_key, current_key, remaining_key):
embedded_doc = getattr(document, current_key, False)
if not embedded_doc:
embedded_doc = document._fields[current_key].document_type_obj()
new_key, new_remaining_key_array = trim_field_key(embedded_doc, remaining_key)
self.process_document(embedded_doc, form_key, make_key(new_key, new_remaining_key_array))
setattr(document, current_key, embedded_doc)
|
Get the existing embedded document if it exists, else created it.
|
def fasta_dict_to_file(fasta_dict, fasta_file, line_char_limit=None):
fasta_fp = fasta_file
if isinstance(fasta_file, str):
fasta_fp = open(fasta_file, 'wb')
for key in fasta_dict:
seq = fasta_dict[key]['seq']
if line_char_limit:
seq = '\n'.join([seq[i:i+line_char_limit] for i in range(0, len(seq), line_char_limit)])
fasta_fp.write(u'{0:s}\n{1:s}\n'.format(fasta_dict[key]['header'], seq))
|
Write fasta_dict to fasta_file
:param fasta_dict: returned by fasta_file_to_dict
:param fasta_file: output file can be a string path or a file object
:param line_char_limit: None = no limit (default)
:return: None
|
def get_varfeatures(simbasedir,
mindet=1000,
nworkers=None):
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
varfeaturedir = os.path.join(simbasedir,'varfeatures')
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
fakelc_formatkey = 'fake-%s' % siminfo['lcformat']
lcproc.register_lcformat(
fakelc_formatkey,
'*-fakelc.pkl',
timecols,
magcols,
errcols,
'astrobase.lcproc',
'_read_pklc',
magsarefluxes=siminfo['magsarefluxes']
)
varinfo = lcvfeatures.parallel_varfeatures(lcfpaths,
varfeaturedir,
lcformat=fakelc_formatkey,
mindet=mindet,
nworkers=nworkers)
with open(os.path.join(simbasedir,'fakelc-varfeatures.pkl'),'wb') as outfd:
pickle.dump(varinfo, outfd, pickle.HIGHEST_PROTOCOL)
return os.path.join(simbasedir,'fakelc-varfeatures.pkl')
|
This runs `lcproc.lcvfeatures.parallel_varfeatures` on fake LCs in
`simbasedir`.
Parameters
----------
simbasedir : str
The directory containing the fake LCs to process.
mindet : int
The minimum number of detections needed to accept an LC and process it.
nworkers : int or None
The number of parallel workers to use when extracting variability
features from the input light curves.
Returns
-------
str
The path to the `varfeatures` pickle created after running the
`lcproc.lcvfeatures.parallel_varfeatures` function.
|
def delete(self, pk=None, fail_on_missing=False, **kwargs):
if not pk:
existing_data = self._lookup(fail_on_missing=fail_on_missing, **kwargs)
if not existing_data:
return {'changed': False}
pk = existing_data['id']
url = '%s%s/' % (self.endpoint, pk)
debug.log('DELETE %s' % url, fg='blue', bold=True)
try:
client.delete(url)
return {'changed': True}
except exc.NotFound:
if fail_on_missing:
raise
return {'changed': False}
|
Remove the given object.
If `fail_on_missing` is True, then the object's not being found is considered a failure; otherwise,
a success with no change is reported.
=====API DOCS=====
Remove the given object.
:param pk: Primary key of the resource to be deleted.
:type pk: int
:param fail_on_missing: Flag that if set, the object's not being found is considered a failure; otherwise,
a success with no change is reported.
:type fail_on_missing: bool
:param `**kwargs`: Keyword arguments used to look up resource object to delete if ``pk`` is not provided.
:returns: dictionary of only one field "changed", which is a flag indicating whether the specified resource
is successfully deleted.
:rtype: dict
=====API DOCS=====
|
def MessageSetItemDecoder(extensions_by_number):
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,
buffer[message_set_item_start:pos]))
return pos
return DecodeItem
|
Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.