code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def remover(self, id_brand): if not is_valid_int_param(id_brand): raise InvalidParameterError( u'The identifier of Brand is invalid or was not informed.') url = 'brand/' + str(id_brand) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
Remove Brand from by the identifier. :param id_brand: Identifier of the Brand. Integer value and greater than zero. :return: None :raise InvalidParameterError: The identifier of Brand is null and invalid. :raise MarcaNaoExisteError: Brand not registered. :raise MarcaError: The brand is associated with a model. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
def unit(self, name: Optional[UnitName] = None, symbol=False): result = self._validate_enum(item=name, enum=UnitName) if symbol: return result[1] return result[0]
Get unit name. :param name: Enum object UnitName. :param symbol: Return only symbol :return: Unit.
def __read_byte_offset(decl, attrs): offset = attrs.get(XML_AN_OFFSET, 0) decl.byte_offset = int(offset) / 8
Using duck typing to set the offset instead of in constructor
def add_table(self, t): self.push_element() self._page.append(t.node) self.cur_element = t
remember to call pop_element after done with table
def complex_median(complex_list): median_real = numpy.median([complex_number.real for complex_number in complex_list]) median_imag = numpy.median([complex_number.imag for complex_number in complex_list]) return median_real + 1.j*median_imag
Get the median value of a list of complex numbers. Parameters ---------- complex_list: list List of complex numbers to calculate the median. Returns ------- a + 1.j*b: complex number The median of the real and imaginary parts.
def remove_pos_arg_placeholders(alias_command): split_command = shlex.split(alias_command) boundary_index = len(split_command) for i, subcommand in enumerate(split_command): if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH: boundary_index = i break return ' '.join(split_command[:boundary_index]).lower()
Remove positional argument placeholders from alias_command. Args: alias_command: The alias command to remove from. Returns: The alias command string without positional argument placeholder.
def altshuler_grun(v, v0, gamma0, gamma_inf, beta): x = v / v0 return gamma_inf + (gamma0 - gamma_inf) * np.power(x, beta)
calculate Gruneisen parameter for Altshuler equation :param v: unit-cell volume in A^3 :param v0: unit-cell volume in A^3 at 1 bar :param gamma0: Gruneisen parameter at 1 bar :param gamma_inf: Gruneisen parameter at infinite pressure :param beta: volume dependence of Gruneisen parameter :return: Gruneisen parameter
def body_block_attribution(tag): "extract the attribution content for figures, tables, videos" attributions = [] if raw_parser.attrib(tag): for attrib_tag in raw_parser.attrib(tag): attributions.append(node_contents_str(attrib_tag)) if raw_parser.permissions(tag): for permissions_tag in raw_parser.permissions(tag): attrib_string = '' attrib_string = join_sentences(attrib_string, node_contents_str(raw_parser.copyright_statement(permissions_tag)), '.') if raw_parser.licence_p(permissions_tag): for licence_p_tag in raw_parser.licence_p(permissions_tag): attrib_string = join_sentences(attrib_string, node_contents_str(licence_p_tag), '.') if attrib_string != '': attributions.append(attrib_string) return attributions
extract the attribution content for figures, tables, videos
def area(self): area = abs(self.primitive.height * self.primitive.polygon.length) area += self.primitive.polygon.area * 2 return area
The surface area of the primitive extrusion. Calculated from polygon and height to avoid mesh creation. Returns ---------- area: float, surface area of 3D extrusion
def create_a10_device_instance(self, context, a10_device_instance): LOG.debug("A10DeviceInstancePlugin.create(): a10_device_instance=%s", a10_device_instance) config = a10_config.A10Config() vthunder_defaults = config.get_vthunder_config() imgr = instance_manager.InstanceManager.from_config(config, context) dev_instance = common_resources.remove_attributes_not_specified( a10_device_instance.get(resources.RESOURCE)) vthunder_config = vthunder_defaults.copy() vthunder_config.update(_convert(dev_instance, _API, _VTHUNDER_CONFIG)) instance = imgr.create_device_instance(vthunder_config, dev_instance.get("name")) db_record = {} db_record.update(_convert(vthunder_config, _VTHUNDER_CONFIG, _DB)) db_record.update(_convert(dev_instance, _API, _DB)) db_record.update(_convert(instance, _INSTANCE, _DB)) db_instance = super(A10DeviceInstancePlugin, self).create_a10_device_instance( context, {resources.RESOURCE: db_record}) return _make_api_dict(db_instance)
Attempt to create instance using neutron context
def C00_(self): self._check_estimated() return self._rc.cov_XX(bessel=self.bessel)
Instantaneous covariance matrix
def convertDate(self, date, prefix="", weekday=False): dayString = self.convertDay( date, prefix=prefix, weekday=weekday) timeString = self.convertTime(date) return dayString + " at " + timeString
Convert a datetime object representing into a human-ready string that can be read, spoken aloud, etc. In effect, runs both convertDay and convertTime on the input, merging the results. Args: date (datetime.date): A datetime object to be converted into text. prefix (str): An optional argument that prefixes the converted string. For example, if prefix="in", you'd receive "in two days", rather than "two days", while the method would still return "tomorrow" (rather than "in tomorrow"). weekday (bool): An optional argument that returns "Monday, Oct. 1" if True, rather than "Oct. 1". Returns: A string representation of the input day and time.
def find_author(self): return Author(name=self.context.capture('git', 'config', 'user.name', check=False, silent=True), email=self.context.capture('git', 'config', 'user.email', check=False, silent=True))
Get the author information from the version control system.
def _HasAccessToClient(self, subject, token): client_id, _ = rdfvalue.RDFURN(subject).Split(2) client_urn = rdf_client.ClientURN(client_id) return self.CheckClientAccess(token, client_urn)
Checks if user has access to a client under given URN.
def getAnalysisServicesDisplayList(self): bsc = getToolByName(self, 'bika_setup_catalog') items = [('', '')] + [(o.getObject().Keyword, o.Title) for o in bsc(portal_type = 'AnalysisService', is_active = True)] items.sort(lambda x, y: cmp(x[1].lower(), y[1].lower())) return DisplayList(list(items))
Returns a Display List with the active Analysis Services available. The value is the keyword and the title is the text to be displayed.
def FetchDiscoveryDoc(discovery_url, retries=5): discovery_urls = _NormalizeDiscoveryUrls(discovery_url) discovery_doc = None last_exception = None for url in discovery_urls: for _ in range(retries): try: content = _GetURLContent(url) if isinstance(content, bytes): content = content.decode('utf8') discovery_doc = json.loads(content) break except (urllib_error.HTTPError, urllib_error.URLError) as e: logging.info( 'Attempting to fetch discovery doc again after "%s"', e) last_exception = e if discovery_doc is None: raise CommunicationError( 'Could not find discovery doc at any of %s: %s' % ( discovery_urls, last_exception)) return discovery_doc
Fetch the discovery document at the given url.
def int_to_rgba(cls, rgba_int): if rgba_int is None: return None, None, None, None alpha = rgba_int % 256 blue = rgba_int / 256 % 256 green = rgba_int / 256 / 256 % 256 red = rgba_int / 256 / 256 / 256 % 256 return (red, green, blue, alpha)
Converts a color Integer into r, g, b, a tuple.
def get_paginated_response(self, data): return Response({ 'next': self.get_next_link(), 'previous': self.get_previous_link(), 'count': self.page.paginator.count, 'num_pages': self.page.paginator.num_pages, 'current_page': self.page.number, 'start': (self.page.number - 1) * self.get_page_size(self.request), 'results': data })
Annotate the response with pagination information.
def copy(self): return RigidTransform(np.copy(self.rotation), np.copy(self.translation), self.from_frame, self.to_frame)
Returns a copy of the RigidTransform. Returns ------- :obj:`RigidTransform` A deep copy of the RigidTransform.
def parse_coach_go(infile): go_list = [] with open(infile) as go_file: for line in go_file.readlines(): go_dict = {} go_split = line.split() go_dict['go_id'] = go_split[0] go_dict['c_score'] = go_split[1] go_dict['go_term'] = ' '.join(go_split[2:]) go_list.append(go_dict) return go_list
Parse a GO output file from COACH and return a rank-ordered list of GO term predictions The columns in all files are: GO terms, Confidence score, Name of GO terms. The files are: - GO_MF.dat - GO terms in 'molecular function' - GO_BP.dat - GO terms in 'biological process' - GO_CC.dat - GO terms in 'cellular component' Args: infile (str): Path to any COACH GO prediction file Returns: Pandas DataFrame: Organized dataframe of results, columns defined below - ``go_id``: GO term ID - ``go_term``: GO term text - ``c_score``: confidence score of the GO prediction
def main(): conf.init(), db.init(conf.DbPath) inqueue = LineQueue(sys.stdin).queue outqueue = type("", (), {"put": lambda self, x: print("\r%s" % x, end=" ")})() if "--quiet" in sys.argv: outqueue = None if conf.MouseEnabled: inqueue.put("mouse_start") if conf.KeyboardEnabled: inqueue.put("keyboard_start") start(inqueue, outqueue)
Entry point for stand-alone execution.
def get_simple_date(datestring): simple_date = re.compile(r"\d{1,2}(\.)\d{1,2}") date = simple_date.search(datestring) if date: dates = date.group().split(".") if len(dates[0]) == 1: dates[0] = add_zero(dates[0]) if len(dates[1]) == 1: dates[1] = add_zero(dates[1]) if date_is_valid(dates): return '.'.join(dates) + '.' return "Failed"
Transforms a datestring into shorter date 7.9.2017 > 07.09 Expects the datestring to be format 07.09.2017. If this is not the case, returns string "Failed". Keyword arguments: datestring -- a string Returns: String -- The date in format "dd.MM." or "Failed"
def add_lb_nodes(self, lb_id, nodes): log.info("Adding load balancer nodes %s" % nodes) resp, body = self._request( 'post', '/loadbalancers/%s/nodes' % lb_id, data={'nodes': nodes}) return body
Adds nodes to an existing LBaaS instance :param string lb_id: Balancer id :param list nodes: Nodes to add. {address, port, [condition]} :rtype :class:`list`
def get_least_common_subsumer(self,from_tid,to_tid): termid_from = self.terminal_for_term.get(from_tid) termid_to = self.terminal_for_term.get(to_tid) path_from = self.paths_for_terminal[termid_from][0] path_to = self.paths_for_terminal[termid_to][0] common_nodes = set(path_from) & set(path_to) if len(common_nodes) == 0: return None else: indexes = [] for common_node in common_nodes: index1 = path_from.index(common_node) index2 = path_to.index(common_node) indexes.append((common_node,index1+index2)) indexes.sort(key=itemgetter(1)) shortest_common = indexes[0][0] return shortest_common
Returns the deepest common subsumer among two terms @type from_tid: string @param from_tid: one term id @type to_tid: string @param to_tid: another term id @rtype: string @return: the term identifier of the common subsumer
def footer(self): return self.footer_left(on=False), self.footer_center(on=False), \ self.footer_right(on=False)
Returns the axis instance where the footer will be printed
def perform(self): if self._driver.w3c: self.w3c_actions.perform() else: for action in self._actions: action()
Performs all stored actions.
def start(self, service): try: map(self.start_class, service.depends) if service.is_running(): return if service in self.failed: log.warning("%s previously failed to start", service) return service.start() except Exception: log.exception("Unable to start service %s", service) self.failed.add(service)
Start the service, catching and logging exceptions
def get_leads(self, offset=None, limit=None, lead_list_id=None, first_name=None, last_name=None, email=None, company=None, phone_number=None, twitter=None): args = locals() args_params = dict((key, value) for key, value in args.items() if value is not None) args_params.pop('self') params = self.base_params params.update(args_params) endpoint = self.base_endpoint.format('leads') return self._query_hunter(endpoint, params)
Gives back all the leads saved in your account. :param offset: Number of leads to skip. :param limit: Maximum number of leads to return. :param lead_list_id: Id of a lead list to query leads on. :param first_name: First name to filter on. :param last_name: Last name to filter on. :param email: Email to filter on. :param company: Company to filter on. :param phone_number: Phone number to filter on. :param twitter: Twitter account to filter on. :return: All leads found as a dict.
def files(self): ios_names = [info.name for info in self._ios_to_add.keys()] return set(self.files_to_add + ios_names)
files that will be add to tar file later should be tuple, list or generator that returns strings
def has_level_label(label_flags, vmin): if label_flags.size == 0 or (label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0): return False else: return True
Returns true if the ``label_flags`` indicate there is at least one label for this level. if the minimum view limit is not an exact integer, then the first tick label won't be shown, so we must adjust for that.
def add_arguments(cls, parser, sys_arg_list=None): parser.add_argument('-f', '--file', dest='file', required=True, help="config file for routing groups " "(only in configfile mode)") return ["file"]
Arguments for the configfile mode.
def reference_fasta(self): if self._db_location: ref_files = glob.glob(os.path.join(self._db_location, "*", self._REF_FASTA)) if ref_files: return ref_files[0]
Absolute path to the fasta file with EricScript reference data.
def getWord(self, pattern, returnDiff = 0): minDist = 10000 closest = None for w in self.patterns: if type(self.patterns[w]) in [int, float, int]: continue if len(self.patterns[w]) == len(pattern): dist = reduce(operator.add, [(a - b) ** 2 for (a,b) in zip(self.patterns[w], pattern )]) if dist == 0.0: if returnDiff: return w, dist else: return w if dist < minDist: minDist = dist closest = w if returnDiff: return closest, minDist else: return closest
Returns the word associated with pattern. Example: net.getWord([0, 0, 0, 1]) => "tom" This method now returns the closest pattern based on distance.
def is_svn_page(html): return (re.search(r'<title>[^<]*Revision \d+:', html) and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
Returns true if the page appears to be the index page of an svn repository
def _file_path(self, dirname, filename): if not os.path.exists(dirname): try: os.makedirs(dirname) except KeyboardInterrupt as e: raise e except Exception: pass fpath = os.path.join(dirname, filename) if not os.path.exists(fpath): try: open(fpath, "w").close() except KeyboardInterrupt as e: raise e except Exception: pass return fpath
Builds an absolute path and creates the directory and file if they don't already exist. @dirname - Directory path. @filename - File name. Returns a full path of 'dirname/filename'.
def to_latex(self): latex = r"[{} " for attribute, value in self: if attribute in ['speaker_model', 'is_in_commonground']: continue value_l = value.to_latex() if value_l == "": continue latex += "{attribute:<15} & {value:<20} \\\\ \n".format(attribute=attribute, value=value_l) latex += "]\n" return latex
Returns a LaTeX representation of an attribute-value matrix
def as_a_dict(self): index_dict = { 'ddoc': self._ddoc_id, 'name': self._name, 'type': self._type, 'def': self._def } if self._partitioned: index_dict['partitioned'] = True return index_dict
Displays the index as a dictionary. This includes the design document id, index name, index type, and index definition. :returns: Dictionary representation of the index as a dictionary
def render_authenticateLinks(self, ctx, data): if self.username is not None: return '' from xmantissa.signup import _getPublicSignupInfo IQ = inevow.IQ(ctx.tag) signupPattern = IQ.patternGenerator('signup-link') signups = [] for (prompt, url) in _getPublicSignupInfo(self.store): signups.append(signupPattern.fillSlots( 'prompt', prompt).fillSlots( 'url', url)) return ctx.tag[signups]
For unauthenticated users, add login and signup links to the given tag. For authenticated users, remove the given tag from the output. When necessary, the I{signup-link} pattern will be loaded from the tag. Each copy of it will have I{prompt} and I{url} slots filled. The list of copies will be added as children of the tag.
def add_automation_link(testcase): automation_link = ( '<a href="{}">Test Source</a>'.format(testcase["automation_script"]) if testcase.get("automation_script") else "" ) testcase["description"] = "{}<br/>{}".format(testcase.get("description") or "", automation_link)
Appends link to automation script to the test description.
def vcf2abook(): from argparse import ArgumentParser, FileType from sys import stdin parser = ArgumentParser(description='Converter from vCard to Abook syntax.') parser.add_argument('infile', nargs='?', type=FileType('r'), default=stdin, help='Input vCard file (default: stdin)') parser.add_argument('outfile', nargs='?', default=expanduser('~/.abook/addressbook'), help='Output Abook file (default: ~/.abook/addressbook)') args = parser.parse_args() Abook.abook_file(args.infile, args.outfile)
Command line tool to convert from vCard to Abook
def get_gicon(self, icon_id: str) -> "Gio.Icon": return Gio.ThemedIcon.new_from_names(self._icon_names[icon_id])
Lookup Gio.Icon from udiskie-internal id.
def video(self): if self._video is None: from twilio.rest.video import Video self._video = Video(self) return self._video
Access the Video Twilio Domain :returns: Video Twilio Domain :rtype: twilio.rest.video.Video
def call_actions_parallel(self, service_name, actions, **kwargs): return self.call_actions_parallel_future(service_name, actions, **kwargs).result()
Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and return once all responses have been received. Returns a list of action responses, one for each action in the same order as provided, or raises an exception if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response is an error (unless `raise_job_errors` is passed as `False`). This method performs expansions if the Client is configured with an expansion converter. :param service_name: The name of the service to call :type service_name: union[str, unicode] :param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects :type actions: iterable[union[ActionRequest, dict]] :param expansions: A dictionary representing the expansions to perform :type expansions: dict :param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults to `True`) :type raise_action_errors: bool :param timeout: If provided, this will override the default transport timeout values to; requests will expire after this number of seconds plus some buffer defined by the transport, and the client will not block waiting for a response for longer than this amount of time. :type timeout: int :param switches: A list of switch value integers :type switches: list :param correlation_id: The request correlation ID :type correlation_id: union[str, unicode] :param continue_on_error: Whether to continue executing further actions once one action has returned errors :type continue_on_error: bool :param context: A dictionary of extra values to include in the context header :type context: dict :param control_extra: A dictionary of extra values to include in the control header :type control_extra: dict :return: A generator of action responses :rtype: Generator[ActionResponse] :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError
def load_json_fixture(fixture_path: str) -> Dict[str, Any]: with open(fixture_path) as fixture_file: file_fixtures = json.load(fixture_file) return file_fixtures
Loads a fixture file, caching the most recent files it loaded.
def update_elements(self, line, column, charcount, docdelta=0): target = self.charindex(line, column) + charcount if line < self.contains_index: for t in self.types: if self._update_char_check(self.types[t], target, docdelta): self._element_charfix(self.types[t], charcount) for m in self.members: if self.members[m].start > target: self.members[m].start += charcount self.members[m].end += charcount self._contains_index = None else: for iexec in self.executables: if self._update_char_check(self.executables[iexec], target, docdelta): self._element_charfix(self.executables[iexec], charcount)
Updates all the element instances that are children of this module to have new start and end charindex values based on an operation that was performed on the module source code. :arg line: the line number of the *start* of the operation. :arg column: the column number of the start of the operation. :arg charcount: the total character length change from the operation. :arg docdelta: the character length of changes made to types/execs that are children of the module whose docstrings external to their definitions were changed.
def set_env(self): if self.cov_source is None: os.environ['COV_CORE_SOURCE'] = '' else: os.environ['COV_CORE_SOURCE'] = UNIQUE_SEP.join(self.cov_source) os.environ['COV_CORE_DATA_FILE'] = self.cov_data_file os.environ['COV_CORE_CONFIG'] = self.cov_config
Put info about coverage into the env so that subprocesses can activate coverage.
def get_workspace_activities(brain, limit=1): mb = queryUtility(IMicroblogTool) items = mb.context_values(brain.getObject(), limit=limit) return [ { 'subject': item.creator, 'verb': 'published', 'object': item.text, 'time': { 'datetime': item.date.strftime('%Y-%m-%d'), 'title': item.date.strftime('%d %B %Y, %H:%M'), } } for item in items ]
Return the workspace activities sorted by reverse chronological order Regarding the time value: - the datetime value contains the time in international format (machine readable) - the title value contains the absolute date and time of the post
def _check_jp2h_child_boxes(self, boxes, parent_box_name): JP2H_CHILDREN = set(['bpcc', 'cdef', 'cmap', 'ihdr', 'pclr']) box_ids = set([box.box_id for box in boxes]) intersection = box_ids.intersection(JP2H_CHILDREN) if len(intersection) > 0 and parent_box_name not in ['jp2h', 'jpch']: msg = "A {0} box can only be nested in a JP2 header box." raise IOError(msg.format(list(intersection)[0])) for box in boxes: if hasattr(box, 'box'): self._check_jp2h_child_boxes(box.box, box.box_id)
Certain boxes can only reside in the JP2 header.
def scale(self, by): self.points *= np.asarray([by]) self._adjust_ports()
Scale the points in the Pattern. Parameters ---------- by : float or np.ndarray, shape=(3,) The factor to scale by. If a scalar, scale all directions isotropically. If np.ndarray, scale each direction independently.
def install_PMK(self): self.pmk = PBKDF2HMAC( algorithm=hashes.SHA1(), length=32, salt=self.ssid.encode(), iterations=4096, backend=default_backend(), ).derive(self.passphrase.encode())
Compute and install the PMK
def _read(self, delta=0, wrap=False, strict=False): index = self._head + delta if index < 0 and (not wrap or abs(index) > len(self._text)): return self.START try: return self._text[index] except IndexError: if strict: self._fail_route() return self.END
Read the value at a relative point in the wikicode. The value is read from :attr:`self._head <_head>` plus the value of *delta* (which can be negative). If *wrap* is ``False``, we will not allow attempts to read from the end of the string if ``self._head + delta`` is negative. If *strict* is ``True``, the route will be failed (with :meth:`_fail_route`) if we try to read from past the end of the string; otherwise, :attr:`self.END <END>` is returned. If we try to read from before the start of the string, :attr:`self.START <START>` is returned.
def pypy_json_encode(value, pretty=False): global _dealing_with_problem if pretty: return pretty_json(value) try: _buffer = UnicodeBuilder(2048) _value2json(value, _buffer) output = _buffer.build() return output except Exception as e: from mo_logs import Log if _dealing_with_problem: Log.error("Serialization of JSON problems", e) else: Log.warning("Serialization of JSON problems", e) _dealing_with_problem = True try: return pretty_json(value) except Exception as f: Log.error("problem serializing object", f) finally: _dealing_with_problem = False
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
def keys_create(gandi, fqdn, flag): key_info = gandi.dns.keys_create(fqdn, int(flag)) output_keys = ['uuid', 'algorithm', 'algorithm_name', 'ds', 'fingerprint', 'public_key', 'flags', 'tag', 'status'] output_generic(gandi, key_info, output_keys, justify=15) return key_info
Create key for a domain.
def get_scene_suggestions(self, current): l = [] if isinstance(current, djadapter.models.Asset): l.append(current) l.extend(list(current.assets.all())) return l
Return a list with elements for reftracks for the current scene with this type. For every element returned, the reftrack system will create a :class:`Reftrack` with the type of this interface, if it is not already in the scene. E.g. if you have a type that references whole scenes, you might suggest all linked assets for shots, and all liked assets plus the current element itself for assets. If you have a type like shader, that usually need a parent, you would return an empty list. Cameras might only make sense for shots and not for assets etc. Do not confuse this with :meth:`ReftypeInterface.get_suggestions`. It will gather suggestions for children of a :class:`Reftrack`. The standard implementation only returns an empty list! :param reftrack: the reftrack which needs suggestions :type reftrack: :class:`Reftrack` :returns: list of suggestions, tuples of type and element. :rtype: list :raises: None
def write(self,fitsname=None,wcs=None,archive=True,overwrite=False,quiet=True): self.update() image = self.rootname _fitsname = fitsname if image.find('.fits') < 0 and _fitsname is not None: self.geisname = image image = self.rootname = _fitsname fimg = fileutil.openImage(image, mode='update', fitsname=_fitsname) _root,_iextn = fileutil.parseFilename(image) _extn = fileutil.getExtn(fimg,_iextn) if wcs: _wcsobj = wcs else: _wcsobj = self for key in _wcsobj.wcstrans.keys(): _dkey = _wcsobj.wcstrans[key] if _dkey != 'pscale': _extn.header[key] = _wcsobj.__dict__[_dkey] fimg.close() del fimg if archive: self.write_archive(fitsname=fitsname,overwrite=overwrite,quiet=quiet)
Write out the values of the WCS keywords to the specified image. If it is a GEIS image and 'fitsname' has been provided, it will automatically make a multi-extension FITS copy of the GEIS and update that file. Otherwise, it throw an Exception if the user attempts to directly update a GEIS image header. If archive=True, also write out archived WCS keyword values to file. If overwrite=True, replace archived WCS values in file with new values. If a WCSObject is passed through the 'wcs' keyword, then the WCS keywords of this object are copied to the header of the image to be updated. A use case fo rthis is updating the WCS of a WFPC2 data quality (_c1h.fits) file in order to be in sync with the science (_c0h.fits) file.
def clean_inconcs(self): for item in self.data: if (item.inconclusive or item.get_verdict() == "unknown") and not item.retries_left > 0: return True return False
Check if there are any inconclusives or uknowns that were not subsequently retried. :return: Boolean
def _create_Z(self,Y): Z = np.ones(((self.ylen*self.lags +1),Y[0].shape[0])) return self.create_design_matrix(Z, self.data, Y.shape[0], self.lags)
Creates design matrix holding the lagged variables Parameters ---------- Y : np.array The dependent variables Y Returns ---------- The design matrix Z
def _callFunc(session, funcName, password, args): txid = _randomString() sock = session.socket sock.send(bytearray('d1:q6:cookie4:txid10:%se' % txid, 'utf-8')) msg = _getMessage(session, txid) cookie = msg['cookie'] txid = _randomString() tohash = (password + cookie).encode('utf-8') req = { 'q': funcName, 'hash': hashlib.sha256(tohash).hexdigest(), 'cookie': cookie, 'args': args, 'txid': txid } if password: req['aq'] = req['q'] req['q'] = 'auth' reqBenc = bencode(req).encode('utf-8') req['hash'] = hashlib.sha256(reqBenc).hexdigest() reqBenc = bencode(req) sock.send(bytearray(reqBenc, 'utf-8')) return _getMessage(session, txid)
Call custom cjdns admin function
def structureOutput(fileUrl, fileName, searchFiles, format=True, space=40): if format: splitUrls = fileUrl[1:].split('/') fileUrl = "" for splitUrl in splitUrls: if splitUrl != "" and (not "." in splitUrl) and (splitUrl != "pub" and splitUrl != "files"): fileUrl += splitUrl + '/' elif "." in splitUrl: archiveName = splitUrl if searchFiles: fileName = archiveName pause = (space - len(fileUrl)) output = fileUrl output += (" " * pause) output += fileName return output
Formats the output of a list of packages
def create(cls, key, crt): options = {'crt': crt, 'key': key} return cls.call('cert.hosted.create', options)
Add a new crt in the hosted cert store.
def add_range(self, start, part_len, total_len): content_range = 'bytes {0}-{1}/{2}'.format(start, start + part_len - 1, total_len) self.statusline = '206 Partial Content' self.replace_header('Content-Range', content_range) self.replace_header('Content-Length', str(part_len)) self.replace_header('Accept-Ranges', 'bytes') return self
Add range headers indicating that this a partial response
def get_doc_id(document_pb, expected_prefix): prefix, document_id = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1) if prefix != expected_prefix: raise ValueError( "Unexpected document name", document_pb.name, "Expected to begin with", expected_prefix, ) return document_id
Parse a document ID from a document protobuf. Args: document_pb (google.cloud.proto.firestore.v1beta1.\ document_pb2.Document): A protobuf for a document that was created in a ``CreateDocument`` RPC. expected_prefix (str): The expected collection prefix for the fully-qualified document name. Returns: str: The document ID from the protobuf. Raises: ValueError: If the name does not begin with the prefix.
def _parse_example(serialized_example): data_fields = { "inputs": tf.VarLenFeature(tf.int64), "targets": tf.VarLenFeature(tf.int64) } parsed = tf.parse_single_example(serialized_example, data_fields) inputs = tf.sparse_tensor_to_dense(parsed["inputs"]) targets = tf.sparse_tensor_to_dense(parsed["targets"]) return inputs, targets
Return inputs and targets Tensors from a serialized tf.Example.
def __autoconnect_signals(self): dic = {} for name in dir(self): method = getattr(self, name) if (not isinstance(method, collections.Callable)): continue assert(name not in dic) dic[name] = method for xml in self.view.glade_xmlWidgets: xml.signal_autoconnect(dic) if self.view._builder is not None: self.view._builder_connect_signals(dic)
This is called during view registration, to autoconnect signals in glade file with methods within the controller
def _assign_zones(self): for zone_id in range(1, 5): zone = \ RainCloudyFaucetZone( parent=self._parent, controller=self._controller, faucet=self, zone_id=zone_id) if zone not in self.zones: self.zones.append(zone)
Assign all RainCloudyFaucetZone managed by faucet.
def remember(empowered, powerupClass, interface): className = fullyQualifiedName(powerupClass) powerup = _StoredByName(store=empowered.store, className=className) empowered.powerUp(powerup, interface)
Adds a powerup to ``empowered`` that will instantiate ``powerupClass`` with the empowered's store when adapted to the given interface. :param empowered: The Empowered (Store or Item) to be powered up. :type empowered: ``axiom.item.Empowered`` :param powerupClass: The class that will be powered up to. :type powerupClass: class :param interface: The interface of the powerup. :type interface: ``zope.interface.Interface`` :returns: ``None``
def git_ls_remote(repo_dir, remote='origin', refs=None): command = ['git', 'ls-remote', pipes.quote(remote)] if refs: if isinstance(refs, list): command.extend(refs) else: command.append(refs) raw = execute_git_command(command, repo_dir=repo_dir).splitlines() output = [l.strip() for l in raw if l.strip() and not l.strip().lower().startswith('from ')] return {ref: commit_hash for commit_hash, ref in [l.split(None, 1) for l in output]}
Run git ls-remote. 'remote' can be a remote ref in a local repo, e.g. origin, or url of a remote repository. Return format: .. code-block:: python {<ref1>: <commit_hash1>, <ref2>: <commit_hash2>, ..., <refN>: <commit_hashN>, }
def _startRelay(self, client): process = client.transport.connector.process for _, data in process.data: client.dataReceived(data) process.protocol = client @process._endedDeferred.addBoth def stopRelay(reason): relay = client.transport relay.loseConnection(reason) connector = relay.connector connector.connectionLost(reason) return client
Start relaying data between the process and the protocol. This method is called when the protocol is connected.
def logpdf(self, mu): if self.transform is not None: mu = self.transform(mu) return -np.log(float(self.sigma0)) - (0.5*(mu-self.mu0)**2)/float(self.sigma0**2)
Log PDF for Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu))
def wquantiles(W, x, alphas=(0.25, 0.50, 0.75)): if len(x.shape) == 1: return _wquantiles(W, x, alphas=alphas) elif len(x.shape) == 2: return np.array([_wquantiles(W, x[:, i], alphas=alphas) for i in range(x.shape[1])])
Quantiles for weighted data. Parameters ---------- W: (N,) ndarray normalised weights (weights are >=0 and sum to one) x: (N,) or (N,d) ndarray data alphas: list-like of size k (default: (0.25, 0.50, 0.75)) probabilities (between 0. and 1.) Returns ------- a (k,) or (d, k) ndarray containing the alpha-quantiles
def write_response( self, status_code: Union[int, constants.HttpStatusCode], *, headers: Optional[_HeaderType]=None ) -> "writers.HttpResponseWriter": self._writer = self.__delegate.write_response( constants.HttpStatusCode(status_code), headers=headers) return self._writer
Write a response to the client.
def hline(level, **kwargs): kwargs.setdefault('colors', ['dodgerblue']) kwargs.setdefault('stroke_width', 1) scales = kwargs.pop('scales', {}) fig = kwargs.get('figure', current_figure()) scales['x'] = fig.scale_x level = array(level) if len(level.shape) == 0: x = [0, 1] y = [level, level] else: x = [0, 1] y = column_stack([level, level]) return plot(x, y, scales=scales, preserve_domain={ 'x': True, 'y': kwargs.get('preserve_domain', False) }, axes=False, update_context=False, **kwargs)
Draws a horizontal line at the given level. Parameters ---------- level: float The level at which to draw the horizontal line. preserve_domain: boolean (default: False) If true, the line does not affect the domain of the 'y' scale.
def pool_set_autostart(name, state='on', **kwargs): conn = __get_conn(**kwargs) try: pool = conn.storagePoolLookupByName(name) return not bool(pool.setAutostart(1 if state == 'on' else 0)) finally: conn.close()
Set the autostart flag on a libvirt storage pool so that the storage pool will start with the host system on reboot. :param name: libvirt storage pool name :param state: 'on' to auto start the pool, anything else to mark the pool not to be started when the host boots :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt "*" virt.pool_set_autostart <pool> <on | off>
def _release_command_buffer(self, command_buffer): if command_buffer.closed: return self._cb_poll.unregister(command_buffer.host_id) self.connection_pool.release(command_buffer.connection) command_buffer.connection = None
This is called by the command buffer when it closes.
def detect_language(lang, kernel_source): if lang is None: if callable(kernel_source): raise TypeError("Please specify language when using a code generator function") kernel_string = get_kernel_string(kernel_source) if "__global__" in kernel_string: lang = "CUDA" elif "__kernel" in kernel_string: lang = "OpenCL" else: lang = "C" return lang
attempt to detect language from the kernel_string if not specified
def new(): dir_path = os.path.dirname(os.path.realpath(__file__)) cookiecutter(os.path.join(dir_path, 'historical-cookiecutter/'))
Creates a new historical technology.
def generate_private_key(key_type): if key_type == u'rsa': return rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend()) raise ValueError(key_type)
Generate a random private key using sensible parameters. :param str key_type: The type of key to generate. One of: ``rsa``.
def get_request_headers(self): request_headers = self.get_proxy_request_headers(self.request) if (self.add_remote_user and hasattr(self.request, 'user') and self.request.user.is_active): request_headers['REMOTE_USER'] = self.request.user.get_username() self.log.info("REMOTE_USER set") return request_headers
Return request headers that will be sent to upstream. The header REMOTE_USER is set to the current user if AuthenticationMiddleware is enabled and the view's add_remote_user property is True. .. versionadded:: 0.9.8
def display_items(self) -> typing.List[Display]: return [Display(display_item) for display_item in self.__document_model.display_items]
Return the list of display items. :return: The list of :py:class:`nion.swift.Facade.Display` objects. .. versionadded:: 1.0 Scriptable: Yes
def rename_annotations(self, sentence): annotations = [] for token in sentence: data = {CLAUSE_IDX: token[CLAUSE_IDX]} if CLAUSE_ANNOT in token: if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END annotations.append(data) return annotations
Function that renames and restructures clause information.
async def dbpoolStats(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["dbpoolStats"], *args, **kwargs)
Statistics on the Database client pool This method is only for debugging the ec2-manager This method is ``experimental``
def untokenize(words): text = ' '.join(words) step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...') step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3) step5 = step4.replace(" '", "'").replace(" n't", "n't").replace( "can not", "cannot") step6 = step5.replace(" ` ", " '") return step6.strip()
Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks.
def hash_trees(self): "hash ladderized tree topologies" observed = {} for idx, tree in enumerate(self.treelist): nwk = tree.write(tree_format=9) hashed = md5(nwk.encode("utf-8")).hexdigest() if hashed not in observed: observed[hashed] = idx self.treedict[idx] = 1 else: idx = observed[hashed] self.treedict[idx] += 1
hash ladderized tree topologies
def start(self): self._process = threading.Thread(target=self._background_runner) self._process.start()
Create a background thread for httpd and serve 'forever
def setup(self, target=None, strict=False, minify=False, line_numbers=False, keep_lines=False, no_tco=False): if target is None: target = "" else: target = str(target).replace(".", "") if target in pseudo_targets: target = pseudo_targets[target] if target not in targets: raise CoconutException( "unsupported target Python version " + ascii(target), extra="supported targets are " + ', '.join(ascii(t) for t in specific_targets) + ", or leave blank for universal", ) logger.log_vars("Compiler args:", locals()) self.target, self.strict, self.minify, self.line_numbers, self.keep_lines, self.no_tco = ( target, strict, minify, line_numbers, keep_lines, no_tco, )
Initializes parsing parameters.
def fields_to_dict(self): d = {} for container in FieldsContainer.class_container.values(): fields = getattr(self, container) if fields: d[container] = [field.to_dict() for field in fields] return d
Transform the object to a dict and return the dict.
def set_until(self, frame, lineno=None): if lineno is None: lineno = frame.f_lineno + 1 self._set_stopinfo(frame, lineno)
Stop when the current line number in frame is greater than lineno or when returning from frame.
def featured_games(self, region): url, query = SpectatorApiV4Urls.featured_games(region=region) return self._raw_request(self.featured_games.__name__, region, url, query)
Get list of featured games. :param string region: The region to execute this request on :returns: FeaturedGames
def snapshot_agent(self): protocols = [i.get_agent_side() for i in self._protocols.values()] return (self.agent, protocols, )
Gives snapshot of everything related to the agent
def get_phi_variables(self, block_addr): if block_addr not in self._phi_variables_by_block: return dict() variables = { } for phi in self._phi_variables_by_block[block_addr]: variables[phi] = self._phi_variables[phi] return variables
Get a dict of phi variables and their corresponding variables. :param int block_addr: Address of the block. :return: A dict of phi variables of an empty dict if there are no phi variables at the block. :rtype: dict
def is_last_attempt(request): environ = request.environ attempt = environ.get('retry.attempt') attempts = environ.get('retry.attempts') if attempt is None or attempts is None: return True return attempt + 1 == attempts
Return ``True`` if the request is on its last attempt, meaning that ``pyramid_retry`` will not be issuing any new attempts, regardless of what happens when executing this request. This will return ``True`` if ``pyramid_retry`` is inactive for the request.
def get_default_attribute_value(cls, object_class, property_name, attr_type=str): if not cls._default_attribute_values_configuration_file_path: return None if not cls._config_parser: cls._read_config() class_name = object_class.__name__ if not cls._config_parser.has_section(class_name): return None if not cls._config_parser.has_option(class_name, property_name): return None if sys.version_info < (3,): integer_types = (int, long,) else: integer_types = (int,) if isinstance(attr_type, integer_types): return cls._config_parser.getint(class_name, property_name) elif attr_type is bool: return cls._config_parser.getboolean(class_name, property_name) else: return cls._config_parser.get(class_name, property_name)
Gets the default value of a given property for a given object. These properties can be set in a config INI file looking like .. code-block:: ini [NUEntity] default_behavior = THIS speed = 1000 [NUOtherEntity] attribute_name = a value This will be used when creating a :class:`bambou.NURESTObject` when no parameter or data is provided
def all(self): data = {} args = self.request.arguments for key, value in args.items(): data[key] = self.get_argument(key) return data
Returns all the arguments passed with the request Sample Usage ++++++++++++ .. code:: python from bast import Controller class MyController(Controller): def index(self): data = self.all() Returns a dictionary of all the request arguments
def noise(): from random import gauss v = Vector3(gauss(0, 1), gauss(0, 1), gauss(0, 1)) v.normalize() return v * args.noise
a noise vector
def get_schema_path(self, path): if path not in self.schemas: raise JSONSchemaNotFound(path) return os.path.join(self.schemas[path], path)
Compute the schema's absolute path from a schema relative path. :param path: relative path of the schema. :raises invenio_jsonschemas.errors.JSONSchemaNotFound: If no schema was found in the specified path. :returns: The absolute path.
def report_metric(metric_name: str, value: int, fail_silently: bool=True): if metricz is None: return configuration = Configuration() try: lizzy_domain = urlparse(configuration.lizzy_url).netloc lizzy_name, _ = lizzy_domain.split('.', 1) except Exception: lizzy_name = 'UNKNOWN' tags = { 'version': VERSION, 'lizzy': lizzy_name } try: writer = metricz.MetricWriter(url=configuration.token_url, directory=configuration.credentials_dir, fail_silently=False) writer.write_metric(metric_name, value, tags, timeout=10) except Exception: if not fail_silently: raise
Tries to report a metric, ignoring all errors
def NamedPlaceholders(iterable): placeholders = ", ".join("%({})s".format(key) for key in sorted(iterable)) return "({})".format(placeholders)
Returns named placeholders from all elements of the given iterable. Use this function for VALUES of MySQL INSERTs. To account for Iterables with undefined order (dicts before Python 3.6), this function sorts column names. Examples: >>> NamedPlaceholders({"password": "foo", "name": "bar"}) u'(%(name)s, %(password)s)' Args: iterable: The iterable of strings to be used as placeholder keys. Returns: A string containing a tuple of comma-separated, sorted, named, placeholders.
def update(self, status=values.unset): return self._proxy.update(status=status, )
Update the FaxInstance :param FaxInstance.UpdateStatus status: The new status of the resource :returns: Updated FaxInstance :rtype: twilio.rest.fax.v1.fax.FaxInstance
def refreshTitles(self): for index in range(self.count()): widget = self.widget(index) self.setTabText(index, widget.windowTitle())
Refreshes the titles for each view within this tab panel.
def complete(self, msg): if _debug: IOCB._debug("complete(%d) %r", self.ioID, msg) if self.ioController: self.ioController.complete_io(self, msg) else: self.ioState = COMPLETED self.ioResponse = msg self.trigger()
Called to complete a transaction, usually when ProcessIO has shipped the IOCB off to some other thread or function.