code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _read_apps(self): apps = {} for cfgfile in glob.iglob(os.path.join(self.confdir, '*.conf')): name = os.path.basename(cfgfile)[0:-5] try: app = AppLogParser(name, cfgfile, self.args, self.logdir, self.fields, self.name_cache, self.report) except (LogRaptorOptionError, LogRaptorConfigError, LogFormatError) as err: logger.error('cannot add app %r: %s', name, err) else: apps[name] = app if not apps: raise LogRaptorConfigError('no configured application in %r!' % self.confdir) return apps
Read the configuration of applications returning a dictionary :return: A dictionary with application names as keys and configuration \ object as values.
def hostname(hn, ft, si): if not hn or not hn.fqdn: hn = ft if hn and hn.fqdn: fqdn = hn.fqdn hostname = hn.hostname if hn.hostname else fqdn.split(".")[0] domain = hn.domain if hn.domain else ".".join(fqdn.split(".")[1:]) return Hostname(fqdn, hostname, domain) else: fqdn = si.get("profile_name") if si else None if fqdn: hostname = fqdn.split(".")[0] domain = ".".join(fqdn.split(".")[1:]) return Hostname(fqdn, hostname, domain) raise Exception("Unable to get hostname.")
Check hostname, facter and systemid to get the fqdn, hostname and domain. Prefer hostname to facter and systemid. Returns: insights.combiners.hostname.Hostname: A named tuple with `fqdn`, `hostname` and `domain` components. Raises: Exception: If no hostname can be found in any of the three parsers.
def calculate_entropy(self, entropy_string): total = 0 for char in entropy_string: if char.isalpha(): prob = self.frequency[char.lower()] total += - math.log(prob) / math.log(2) logging.debug("Entropy score: {0}".format(total)) return total
Calculates the entropy of a string based on known frequency of English letters. Args: entropy_string: A str representing the string to calculate. Returns: A negative float with the total entropy of the string (higher is better).
def _maybe_match_name(a, b): a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') if a_has and b_has: if a.name == b.name: return a.name else: return None elif a_has: return a.name elif b_has: return b.name return None
Try to find a name to attach to the result of an operation between a and b. If only one of these has a `name` attribute, return that name. Otherwise return a consensus name if they match of None if they have different names. Parameters ---------- a : object b : object Returns ------- name : str or None See Also -------- pandas.core.common.consensus_name_attr
def _parse_os_release(*os_release_files): ret = {} for filename in os_release_files: try: with salt.utils.files.fopen(filename) as ifile: regex = re.compile('^([\\w]+)=(?:\'|")?(.*?)(?:\'|")?$') for line in ifile: match = regex.match(line.strip()) if match: ret[match.group(1)] = re.sub( r'\\([$"\'\\`])', r'\1', match.group(2) ) break except (IOError, OSError): pass return ret
Parse os-release and return a parameter dictionary See http://www.freedesktop.org/software/systemd/man/os-release.html for specification of the file format.
def parse_source_file(filename): with open(filename, 'rb') as fid: content = fid.read() content = content.replace(b'\r\n', b'\n') try: node = ast.parse(content) return node, content.decode('utf-8') except SyntaxError: return None, content.decode('utf-8')
Parse source file into AST node Parameters ---------- filename : str File path Returns ------- node : AST node content : utf-8 encoded string
def _download_mirbase(args, version="CURRENT"): if not args.hairpin or not args.mirna: logger.info("Working with version %s" % version) hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz") mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz") if not file_exists(hairpin_fn): cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn) do.run(cmd_h, "download hairpin") if not file_exists(mirna_fn): cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn) do.run(cmd_m, "download mirna") else: return args.hairpin, args.mirna
Download files from mirbase
def parse_band_log(self, message): if "payload" in message and hasattr(message["payload"], "name"): record = message["payload"] for k in dir(record): if k.startswith("workflows_exc_"): setattr(record, k[14:], getattr(record, k)) delattr(record, k) for k, v in self.get_status().items(): setattr(record, "workflows_" + k, v) logging.getLogger(record.name).handle(record) else: self.log.warning( "Received broken record on log band\n" + "Message: %s\nRecord: %s", str(message), str( hasattr(message.get("payload"), "__dict__") and message["payload"].__dict__ ), )
Process incoming logging messages from the service.
def _read_para_hip_signature_2(self, code, cbit, clen, *, desc, length, version): _algo = self._read_unpack(2) _sign = self._read_fileng(clen-2) hip_signature_2 = dict( type=desc, critical=cbit, length=clen, algorithm=_HI_ALGORITHM.get(_algo, 'Unassigned'), signature=_sign, ) _plen = length - clen if _plen: self._read_fileng(_plen) return hip_signature_2
Read HIP HIP_SIGNATURE_2 parameter. Structure of HIP HIP_SIGNATURE_2 parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | SIG alg | Signature / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ / | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 hip_signature_2.type Parameter Type 1 15 hip_signature_2.critical Critical Bit 2 16 hip_signature_2.length Length of Contents 4 32 hip_signature_2.algorithm SIG Algorithm 6 48 hip_signature_2.signature Signature ? ? - Padding
def save_script_file_for_state_and_source_path(state, state_path_full, as_copy=False): from rafcon.core.states.execution_state import ExecutionState if isinstance(state, ExecutionState): source_script_file = os.path.join(state.script.path, state.script.filename) destination_script_file = os.path.join(state_path_full, SCRIPT_FILE) try: write_file(destination_script_file, state.script_text) except Exception: logger.exception("Storing of script file failed: {0} -> {1}".format(state.get_path(), destination_script_file)) raise if not source_script_file == destination_script_file and not as_copy: state.script.filename = SCRIPT_FILE state.script.path = state_path_full
Saves the script file for a state to the directory of the state. The script name will be set to the SCRIPT_FILE constant. :param state: The state of which the script file should be saved :param str state_path_full: The path to the file system storage location of the state :param bool as_copy: Temporary storage flag to signal that the given path is not the new file_system_path
def _find_start_time(hdr, s_freq): start_time = hdr['stc']['creation_time'] for one_stamp in hdr['stamps']: if one_stamp['segment_name'].decode() == hdr['erd']['filename']: offset = one_stamp['start_stamp'] break erd_time = (hdr['erd']['creation_time'] - timedelta(seconds=offset / s_freq)).replace(microsecond=0) stc_erd_diff = (start_time - erd_time).total_seconds() if stc_erd_diff > START_TIME_TOL: lg.warn('Time difference between ERD and STC is {} s so using ERD time' ' at {}'.format(stc_erd_diff, erd_time)) start_time = erd_time return start_time
Find the start time, usually in STC, but if that's not correct, use ERD Parameters ---------- hdr : dict header with stc (and stamps) and erd s_freq : int sampling frequency Returns ------- datetime either from stc or from erd Notes ----- Sometimes, but rather rarely, there is a mismatch between the time in the stc and the time in the erd. For some reason, the time in the stc is way off (by hours), which is clearly not correct. We can try to reconstruct the actual time, but looking at the ERD time (of any file apart from the first one) and compute the original time back based on the offset of the number of samples in stc. For some reason, this is not the same for all the ERD, but the jitter is in the order of 1-2s which is acceptable for our purposes (probably, but be careful about the notes).
def _Execute(self, options): whitelist = dict( name=options["name"], description=options.get("description", "<empty>")) return self._agent.client.compute.security_groups.create(**whitelist)
Handles security groups operations.
def set_value(self, pymux, value): try: value = int(value) if value < 0: raise ValueError except ValueError: raise SetOptionError('Expecting an integer.') else: setattr(pymux, self.attribute_name, value)
Take a string, and return an integer. Raise SetOptionError when the given text does not parse to a positive integer.
def genlmsg_valid_hdr(nlh, hdrlen): if not nlmsg_valid_hdr(nlh, GENL_HDRLEN): return False ghdr = genlmsghdr(nlmsg_data(nlh)) if genlmsg_len(ghdr) < NLMSG_ALIGN(hdrlen): return False return True
Validate Generic Netlink message headers. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L117 Verifies the integrity of the Netlink and Generic Netlink headers by enforcing the following requirements: - Valid Netlink message header (`nlmsg_valid_hdr()`) - Presence of a complete Generic Netlink header - At least `hdrlen` bytes of payload included after the generic Netlink header. Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of user header (integer). Returns: True if the headers are valid or False if not.
def changed_path(self): "Find any changed path and update all changed modification times." result = None for path in self.paths_to_modification_times: lastmod = self.paths_to_modification_times[path] mod = os.path.getmtime(path) if mod > lastmod: result = "Watch file has been modified: " + repr(path) self.paths_to_modification_times[path] = mod for folder in self.folder_paths: for filename in os.listdir(folder): subpath = os.path.join(folder, filename) if os.path.isfile(subpath) and subpath not in self.paths_to_modification_times: result = "New file in watched folder: " + repr(subpath) self.add(subpath) if self.check_python_modules: self.add_all_modules() if self.check_javascript: self.watch_javascript() return result
Find any changed path and update all changed modification times.
def save(self, expires=None): if expires is None: expires = self.expires s = self.serialize() key = self._key(self._all_keys()) _cache.set(key, s, expires)
Save a copy of the object into the cache.
def populateFromRow(self, referenceSetRecord): self._dataUrl = referenceSetRecord.dataurl self._description = referenceSetRecord.description self._assemblyId = referenceSetRecord.assemblyid self._isDerived = bool(referenceSetRecord.isderived) self._md5checksum = referenceSetRecord.md5checksum species = referenceSetRecord.species if species is not None and species != 'null': self.setSpeciesFromJson(species) self._sourceAccessions = json.loads( referenceSetRecord.sourceaccessions) self._sourceUri = referenceSetRecord.sourceuri
Populates this reference set from the values in the specified DB row.
def _rev(repo): try: repo_info = dict(six.iteritems(CLIENT.info(repo['repo']))) except (pysvn._pysvn.ClientError, TypeError, KeyError, AttributeError) as exc: log.error( 'Error retrieving revision ID for svnfs remote %s ' '(cachedir: %s): %s', repo['url'], repo['repo'], exc ) else: return repo_info['revision'].number return None
Returns revision ID of repo
def prepare(self): super(RequestHandler, self).prepare() if self.request.headers.get('content-type', '').startswith(self.JSON): self.request.body = escape.json_decode(self.request.body)
Prepare the incoming request, checking to see the request is sending JSON content in the request body. If so, the content is decoded and assigned to the json_arguments attribute.
def find_config(directory_or_file, debug=False): directory_or_file = os.path.realpath(directory_or_file) if os.path.isfile(directory_or_file): if debug: print('using config file {}'.format(directory_or_file), file=sys.stderr) return directory_or_file directory = directory_or_file while directory: for filename in CONFIG_FILES: candidate = os.path.join(directory, filename) if os.path.exists(candidate): if debug: print('using config file {}'.format(candidate), file=sys.stderr) return candidate parent_directory = os.path.dirname(directory) if parent_directory == directory: break else: directory = parent_directory
Return configuration filename. If `directory_or_file` is a file, return the real-path of that file. If it is a directory, find the configuration (any file name in CONFIG_FILES) in that directory or its ancestors.
def run(self, host, port=25, with_ssl=False): try: dns_rec = self._lookup(host, port) self._connect(dns_rec) if with_ssl: self._wrap_ssl() banner = self._get_banner() self._check_banner(banner) except Exception: exc_type, exc_value, exc_tb = sys.exc_info() self.results['Exception-Type'] = str(exc_type.__name__) self.results['Exception-Value'] = str(exc_value) self.results['Exception-Traceback'] = repr(traceback.format_exc()) finally: self._close(with_ssl)
Executes a single health check against a remote host and port. This method may only be called once per object. :param host: The hostname or IP address of the SMTP server to check. :type host: str :param port: The port number of the SMTP server to check. :type port: int :param with_ssl: If ``True``, SSL will be initiated before attempting to get the banner message. :type with_ssl: bool
def launchDashboardOverlay(self, pchAppKey): fn = self.function_table.launchDashboardOverlay result = fn(pchAppKey) return result
Launches the dashboard overlay application if it is not already running. This call is only valid for dashboard overlay applications.
def _parse_use(self, string): result = {} for ruse in self.RE_USE.finditer(string): name = ruse.group("name").split("!")[0].strip() if name.lower() == "mpi": continue if ruse.group("only"): only = ruse.group("only").split(",") for method in only: key = "{}.{}".format(name, method.strip()) self._dict_increment(result, key) else: self._dict_increment(result, name) return result
Extracts use dependencies from the innertext of a module.
def sphericalAngSep(ra0, dec0, ra1, dec1, radians=False): if radians==False: ra0 = np.radians(ra0) dec0 = np.radians(dec0) ra1 = np.radians(ra1) dec1 = np.radians(dec1) deltaRa= ra1-ra0 deltaDec= dec1-dec0 val = haversine(deltaDec) val += np.cos(dec0) * np.cos(dec1) * haversine(deltaRa) val = min(1, np.sqrt(val)) ; val = 2*np.arcsin(val) if radians==False: val = np.degrees(val) return val
Compute the spherical angular separation between two points on the sky. //Taken from http://www.movable-type.co.uk/scripts/gis-faq-5.1.html NB: For small distances you can probably use sqrt( dDec**2 + cos^2(dec)*dRa) where dDec = dec1 - dec0 and dRa = ra1 - ra0 and dec1 \approx dec \approx dec0
def __add_token_annotation_tier(self, tier): for i, event in enumerate(tier.iter('event')): anno_key = '{0}:{1}'.format(self.ns, tier.attrib['category']) anno_val = event.text if event.text else '' self.node[event.attrib['start']][anno_key] = anno_val
adds a tier to the document graph, in which each event annotates exactly one token.
def _prepare_value_nd(self, value, vshape): if isinstance(value, numeric_types): value_nd = full(shape=vshape, val=value, ctx=self.context, dtype=self.dtype) elif isinstance(value, NDArray): value_nd = value.as_in_context(self.context) if value_nd.dtype != self.dtype: value_nd = value_nd.astype(self.dtype) else: try: value_nd = array(value, ctx=self.context, dtype=self.dtype) except: raise TypeError('NDArray does not support assignment with non-array-like' ' object %s of type %s' % (str(value), str(type(value)))) if value_nd.shape != vshape: value_nd = value_nd.broadcast_to(vshape) return value_nd
Given value and vshape, create an `NDArray` from value with the same context and dtype as the current one and broadcast it to vshape.
def _read_stderr(self): f = open(self.stderr_file, 'rb') try: stderr_text = f.read() if not stderr_text: return '' encoding = get_coding(stderr_text) stderr_text = to_text_string(stderr_text, encoding) return stderr_text finally: f.close()
Read the stderr file of the kernel.
def internal_get_description(dbg, seq, thread_id, frame_id, expression): try: frame = dbg.find_frame(thread_id, frame_id) description = pydevd_console.get_description(frame, thread_id, frame_id, expression) description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t')) description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description cmd = dbg.cmd_factory.make_get_description_message(seq, description_xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(seq, "Error in fetching description" + exc) dbg.writer.add_command(cmd)
Fetch the variable description stub from the debug console
def get_epoch_namespace_lifetime_grace_period( block_height, namespace_id ): epoch_config = get_epoch_config( block_height ) if epoch_config['namespaces'].has_key(namespace_id): return epoch_config['namespaces'][namespace_id]['NAMESPACE_LIFETIME_GRACE_PERIOD'] else: return epoch_config['namespaces']['*']['NAMESPACE_LIFETIME_GRACE_PERIOD']
what's the namespace lifetime grace period for this epoch?
def promise(cls, fn, *args, **kwargs): task = cls.task(target=fn, args=args, kwargs=kwargs) task.start() return task
Used to build a task based on a callable function and the arguments. Kick it off and start execution of the task. :param fn: callable :param args: tuple :param kwargs: dict :return: SynchronousTask or AsynchronousTask
def tuple_of(*generators): class TupleOfGenerators(ArbitraryInterface): @classmethod def arbitrary(cls): return tuple([ arbitrary(generator) for generator in generators if generator is not tuple ]) TupleOfGenerators.__name__ = ''.join([ 'tuple_of(', ', '.join(generator.__name__ for generator in generators), ')' ]) return TupleOfGenerators
Generates a tuple by generating values for each of the specified generators. This is a class factory, it makes a class which is a closure around the specified generators.
def make_ttv_yaml(corpora, path_to_ttv_file, ttv_ratio=DEFAULT_TTV_RATIO, deterministic=False): dataset = get_dataset(corpora) data_sets = make_ttv(dataset, ttv_ratio=ttv_ratio, deterministic=deterministic) def get_for_ttv(key): return ( data_sets['test'][key], data_sets['train'][key], data_sets['validation'][key] ) test, train, validation = get_for_ttv('paths') number_of_files_for_each_set = list(get_for_ttv('number_of_files')) number_of_subjects_for_each_set = [len(x) for x in get_for_ttv('subjects')] dict_for_yaml = { 'split': number_of_files_for_each_set, 'subject_split': number_of_subjects_for_each_set, "test": test, "train": train, "validation": validation } with open(path_to_ttv_file, 'w') as f: yaml.dump(dict_for_yaml, f, default_flow_style=False)
Create a test, train, validation from the corpora given and saves it as a YAML filename. Each set will be subject independent, meaning that no one subject can have data in more than one set # Arguments; corpora: a list of the paths to corpora used (these have to be formatted accoring to notes.md) path_to_ttv_file: the path to where the YAML file be be saved ttv_ratio: a tuple (e.g. (1,4,4) of the relative sizoe of each set) deterministic: whether or not to shuffle the resources around when making the set.
def verify_logout_request(cls, logout_request, ticket): try: session_index = cls.get_saml_slos(logout_request) session_index = session_index[0].text if session_index == ticket: return True else: return False except (AttributeError, IndexError): return False
verifies the single logout request came from the CAS server returns True if the logout_request is valid, False otherwise
def matrix(ctx, scenario_name, subcommand): args = ctx.obj.get('args') command_args = { 'subcommand': subcommand, } s = scenarios.Scenarios( base.get_configs(args, command_args), scenario_name) s.print_matrix()
List matrix of steps used to test instances.
def validate(self, val): if self.validation: self.type.validate(val) if self.custom_validator is not None: self.custom_validator(val) return True
Validate values according to the requirement
def request_with_retries_on_post_search(self, session, url, query, json_input, stream, headers): status_code = 500 if '/v1/search' in url: retry_count = 10 else: retry_count = 1 while status_code in (500, 502, 503, 504) and retry_count > 0: try: retry_count -= 1 res = session.request(self.http_method, url, params=query, json=json_input, stream=stream, headers=headers, timeout=self.client.timeout_policy) status_code = res.status_code except SwaggerAPIException: if retry_count > 0: pass else: raise return res
Submit a request and retry POST search requests specifically. We don't currently retry on POST requests, and this is intended as a temporary fix until the swagger is updated and changes applied to prod. In the meantime, this function will add retries specifically for POST search (and any other POST requests will not be retried).
def get_active_keys_to_keycode_list(self): try: _libxdo.xdo_get_active_keys_to_keycode_list except AttributeError: raise NotImplementedError() keys = POINTER(charcodemap_t) nkeys = ctypes.c_int(0) _libxdo.xdo_get_active_keys_to_keycode_list( self._xdo, ctypes.byref(keys), ctypes.byref(nkeys)) return keys.value
Get a list of active keys. Uses XQueryKeymap
def log(self, level, message): if self.log_fd is not None: prefix = struct.pack('ii', level, len(message)) os.write(self.log_fd, prefix) os.write(self.log_fd, message)
Write a log message via the child process. The child process must already exist; call :meth:`live_log_child` to make sure. If it has died in a way we don't expect then this will raise :const:`signal.SIGPIPE`.
def _postrun(self, result): logger.debug( "{}.PostRun: {}[{}]".format( self.__class__.__name__, self.__class__.path, self.uuid ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params, metadata=self.metadata ).dump() ) ) return self.postrun(result)
To execute after exection :param kser.result.Result result: Execution result :return: Execution result :rtype: kser.result.Result
def use_db(path, mode=WorkDB.Mode.create): database = WorkDB(path, mode) try: yield database finally: database.close()
Open a DB in file `path` in mode `mode` as a context manager. On exiting the context the DB will be automatically closed. Args: path: The path to the DB file. mode: The mode in which to open the DB. See the `Mode` enum for details. Raises: FileNotFoundError: If `mode` is `Mode.open` and `path` does not exist.
def reject_entry(request, entry_id): return_url = request.GET.get('next', reverse('dashboard')) try: entry = Entry.no_join.get(pk=entry_id) except: message = 'No such log entry.' messages.error(request, message) return redirect(return_url) if entry.status == Entry.UNVERIFIED or entry.status == Entry.INVOICED: msg_text = 'This entry is unverified or is already invoiced.' messages.error(request, msg_text) return redirect(return_url) if request.POST.get('Yes'): entry.status = Entry.UNVERIFIED entry.save() msg_text = 'The entry\'s status was set to unverified.' messages.info(request, msg_text) return redirect(return_url) return render(request, 'timepiece/entry/reject.html', { 'entry': entry, 'next': request.GET.get('next'), })
Admins can reject an entry that has been verified or approved but not invoiced to set its status to 'unverified' for the user to fix.
def list_changes(self): if not self.is_attached(): raise ItsdbError('changes are not tracked for detached tables.') return [(i, self[i]) for i, row in enumerate(self._records) if row is not None]
Return a list of modified records. This is only applicable for attached tables. Returns: A list of `(row_index, record)` tuples of modified records Raises: :class:`delphin.exceptions.ItsdbError`: when called on a detached table
def decode(addr): hrpgot, data = bech32_decode(addr) if hrpgot not in BECH32_VERSION_SET: return (None, None) decoded = convertbits(data[1:], 5, 8, False) if decoded is None or len(decoded) < 2 or len(decoded) > 40: return (None, None) if data[0] > 16: return (None, None) if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32: return (None, None) return (data[0], decoded)
Decode a segwit address.
def asQuartusTcl(self, buff: List[str], version: str, component: "Component", packager: "IpPackager", thisIf: 'Interface'): name = packager.getInterfaceLogicalName(thisIf) self.quartus_tcl_add_interface(buff, thisIf, packager) clk = thisIf._getAssociatedClk() if clk is not None: self.quartus_prop(buff, name, "associatedClock", clk._sigInside.name, escapeStr=False) rst = thisIf._getAssociatedRst() if rst is not None: self.quartus_prop(buff, name, "associatedReset", rst._sigInside.name, escapeStr=False) m = self.get_quartus_map() if m: intfMapOrName = m else: intfMapOrName = thisIf.name self._asQuartusTcl(buff, version, name, component, packager, thisIf, intfMapOrName)
Add interface to Quartus tcl :param buff: line buffer for output :param version: Quartus version :param intfName: name of top interface :param component: component object from ipcore generator :param packager: instance of IpPackager which is packagin current design :param allInterfaces: list of all interfaces of top unit :param thisIf: interface to add into Quartus TCL
def hasnew(self,allowempty=False): for e in self.select(New,None,False, False): if not allowempty and len(e) == 0: continue return True return False
Does the correction define new corrected annotations?
def MakeOdds(self): for hypo, prob in self.Items(): if prob: self.Set(hypo, Odds(prob)) else: self.Remove(hypo)
Transforms from probabilities to odds. Values with prob=0 are removed.
def get_self_host(request_data): if 'http_host' in request_data: current_host = request_data['http_host'] elif 'server_name' in request_data: current_host = request_data['server_name'] else: raise Exception('No hostname defined') if ':' in current_host: current_host_data = current_host.split(':') possible_port = current_host_data[-1] try: possible_port = float(possible_port) current_host = current_host_data[0] except ValueError: current_host = ':'.join(current_host_data) return current_host
Returns the current host. :param request_data: The request as a dict :type: dict :return: The current host :rtype: string
def schedCoro(self, coro): import synapse.lib.provenance as s_provenance if __debug__: assert s_coro.iscoro(coro) import synapse.lib.threads as s_threads assert s_threads.iden() == self.tid task = self.loop.create_task(coro) if asyncio.current_task(): s_provenance.dupstack(task) def taskDone(task): self._active_tasks.remove(task) try: task.result() except asyncio.CancelledError: pass except Exception: logger.exception('Task scheduled through Base.schedCoro raised exception') self._active_tasks.add(task) task.add_done_callback(taskDone) return task
Schedules a free-running coroutine to run on this base's event loop. Kills the coroutine if Base is fini'd. It does not pend on coroutine completion. Precondition: This function is *not* threadsafe and must be run on the Base's event loop Returns: asyncio.Task: An asyncio.Task object.
def get_statistics_24h(self, endtime): js = json.dumps( {'attrs': ["bytes", "num_sta", "time"], 'start': int(endtime - 86400) * 1000, 'end': int(endtime - 3600) * 1000}) params = urllib.urlencode({'json': js}) return self._read(self.api_url + 'stat/report/hourly.system', params)
Return statistical data last 24h from time
def check_jobs(jobs): if jobs == 0: raise click.UsageError("Jobs must be >= 1 or == -1") elif jobs < 0: import multiprocessing jobs = multiprocessing.cpu_count() return jobs
Validate number of jobs.
def when_matches(self, path, good_value, bad_values=None, timeout=None, event_timeout=None): future = self.when_matches_async(path, good_value, bad_values) self.wait_all_futures( future, timeout=timeout, event_timeout=event_timeout)
Resolve when an path value equals value Args: path (list): The path to wait to good_value (object): the value to wait for bad_values (list): values to raise an error on timeout (float): time in seconds to wait for responses, wait forever if None event_timeout: maximum time in seconds to wait between each response event, wait forever if None
def fit(self, data, parent_node=None, estimator=None): if not parent_node: if not self.parent_node: raise ValueError("parent node must be specified for the model") else: parent_node = self.parent_node if parent_node not in data.columns: raise ValueError("parent node: {node} is not present in the given data".format(node=parent_node)) for child_node in data.columns: if child_node != parent_node: self.add_edge(parent_node, child_node) super(NaiveBayes, self).fit(data, estimator)
Computes the CPD for each node from a given data in the form of a pandas dataframe. If a variable from the data is not present in the model, it adds that node into the model. Parameters ---------- data : pandas DataFrame object A DataFrame object with column names same as the variable names of network parent_node: any hashable python object (optional) Parent node of the model, if not specified it looks for a previously specified parent node. estimator: Estimator class Any pgmpy estimator. If nothing is specified, the default ``MaximumLikelihoodEstimator`` would be used. Examples -------- >>> import numpy as np >>> import pandas as pd >>> from pgmpy.models import NaiveBayes >>> model = NaiveBayes() >>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)), ... columns=['A', 'B', 'C', 'D', 'E']) >>> model.fit(values, 'A') >>> model.get_cpds() [<TabularCPD representing P(D:2 | A:2) at 0x4b72870>, <TabularCPD representing P(E:2 | A:2) at 0x4bb2150>, <TabularCPD representing P(A:2) at 0x4bb23d0>, <TabularCPD representing P(B:2 | A:2) at 0x4bb24b0>, <TabularCPD representing P(C:2 | A:2) at 0x4bb2750>] >>> model.edges() [('A', 'D'), ('A', 'E'), ('A', 'B'), ('A', 'C')]
def create_index(self): es = self._init_connection() if not es.indices.exists(index=self.index): es.indices.create(index=self.index, body=self.settings)
Override to provide code for creating the target index. By default it will be created without any special settings or mappings.
def _from_dict(cls, _dict): args = {} if 'environment_id' in _dict: args['environment_id'] = _dict.get('environment_id') if 'collection_id' in _dict: args['collection_id'] = _dict.get('collection_id') if 'queries' in _dict: args['queries'] = [ TrainingQuery._from_dict(x) for x in (_dict.get('queries')) ] return cls(**args)
Initialize a TrainingDataSet object from a json dictionary.
def addLabel(self, aminoAcidLabels, excludingModifications=None): if excludingModifications is not None: self.excludingModifictions = True labelEntry = {'aminoAcidLabels': aminoAcidLabels, 'excludingModifications': excludingModifications } self.labels[self._labelCounter] = labelEntry self._labelCounter += 1
Adds a new labelstate. :param aminoAcidsLabels: Describes which amino acids can bear which labels. Possible keys are the amino acids in one letter code and 'nTerm', 'cTerm'. Possible values are the modifications ids from :attr:`maspy.constants.aaModMass` as strings or a list of strings. An example for one expected label at the n-terminus and two expected labels at each Lysine: ``{'nTerm': 'u:188', 'K': ['u:188', 'u:188']}`` :param excludingModifications: optional, A Dectionary that describes which modifications can prevent the addition of labels. Keys and values have to be the modifications ids from :attr:`maspy.constants.aaModMass`. The key specifies the modification that prevents the label modification specified by the value. For example for each modification 'u:1' that is present at an amino acid or terminus of a peptide the number of expected labels at this position is reduced by one: ``{'u:1':'u:188'}``
def detach_all_classes(self): classes = list(self._observers.keys()) for cls in classes: self.detach_class(cls)
Detach from all tracked classes.
def classproperty(func): doc = func.__doc__ if not isinstance(func, (classmethod, staticmethod)): func = classmethod(func) return ClassPropertyDescriptor(func, doc)
Use as a decorator on a method definition to make it a class-level attribute. This decorator can be applied to a method, a classmethod, or a staticmethod. This decorator will bind the first argument to the class object. Usage: >>> class Foo(object): ... @classproperty ... def name(cls): ... return cls.__name__ ... >>> Foo.name 'Foo' Setting or deleting the attribute of this name will overwrite this property. The docstring of the classproperty `x` for a class `C` can be obtained by `C.__dict__['x'].__doc__`.
def sort_cards(cards, ranks=None): ranks = ranks or DEFAULT_RANKS if ranks.get("suits"): cards = sorted( cards, key=lambda x: ranks["suits"][x.suit] if x.suit != None else 0 ) if ranks.get("values"): cards = sorted( cards, key=lambda x: ranks["values"][x.value] ) return cards
Sorts a given list of cards, either by poker ranks, or big two ranks. :arg cards: The cards to sort. :arg dict ranks: The rank dict to reference for sorting. If ``None``, it will default to ``DEFAULT_RANKS``. :returns: The sorted cards.
def from_sequence(chain, list_of_residues, sequence_type = None): s = Sequence(sequence_type) count = 1 for ResidueAA in list_of_residues: s.add(Residue(chain, count, ResidueAA, sequence_type)) count += 1 return s
Takes in a chain identifier and protein sequence and returns a Sequence object of Residues, indexed from 1.
def rule_variable(field_type, label=None, options=None): options = options or [] def wrapper(func): if not (type(field_type) == type and issubclass(field_type, BaseType)): raise AssertionError("{0} is not instance of BaseType in"\ " rule_variable field_type".format(field_type)) func.field_type = field_type func.is_rule_variable = True func.label = label \ or fn_name_to_pretty_label(func.__name__) func.options = options return func return wrapper
Decorator to make a function into a rule variable
def start_in_keepedalive_processes(obj, nb_process): processes = [] readers_pipes = [] writers_pipes = [] for i in range(nb_process): local_read_pipe, local_write_pipe = Pipe(duplex=False) process_read_pipe, process_write_pipe = Pipe(duplex=False) readers_pipes.append(local_read_pipe) writers_pipes.append(process_write_pipe) p = Process(target=run_keepedalive_process, args=(local_write_pipe, process_read_pipe, obj)) p.start() processes.append(p) for job in range(3): print('send new job to processes:') for process_number in range(nb_process): writers_pipes[process_number].send(obj) reader_useds = [] while readers_pipes: for r in wait(readers_pipes): try: r.recv() except EOFError: pass finally: reader_useds.append(r) readers_pipes.remove(r) readers_pipes = reader_useds for writer_pipe in writers_pipes: writer_pipe.send('stop')
Start nb_process and keep them alive. Send job to them multiple times, then close thems.
def ring_position(self): if self.type != EventType.TABLET_PAD_RING: raise AttributeError(_wrong_prop.format(self.type)) return self._libinput.libinput_event_tablet_pad_get_ring_position( self._handle)
The current position of the ring, in degrees counterclockwise from the northern-most point of the ring in the tablet's current logical orientation. If the source is :attr:`~libinput.constant.TabletPadRingAxisSource.FINGER`, libinput sends a terminating event with a ring value of -1 when the finger is lifted from the ring. A caller may use this information to e.g. determine if kinetic scrolling should be triggered. For events not of type :attr:`~libinput.constant.EventType.TABLET_PAD_RING`, this property raises :exc:`AttributeError`. Returns: float: The current value of the the axis. -1 if the finger was lifted. Raises: AttributeError
def init_services(service_definitions, service_context, state_db, client_authn_factory=None): service = {} for service_name, service_configuration in service_definitions.items(): try: kwargs = service_configuration['kwargs'] except KeyError: kwargs = {} kwargs.update({'service_context': service_context, 'state_db': state_db, 'client_authn_factory': client_authn_factory}) if isinstance(service_configuration['class'], str): _srv = util.importer(service_configuration['class'])(**kwargs) else: _srv = service_configuration['class'](**kwargs) try: service[_srv.service_name] = _srv except AttributeError: raise ValueError("Could not load '{}'".format(service_name)) return service
Initiates a set of services :param service_definitions: A dictionary cotaining service definitions :param service_context: A reference to the service context, this is the same for all service instances. :param state_db: A reference to the state database. Shared by all the services. :param client_authn_factory: A list of methods the services can use to authenticate the client to a service. :return: A dictionary, with service name as key and the service instance as value.
def _capabilities_dict(envs, tags): capabilities = { 'browserName': envs['SELENIUM_BROWSER'], 'acceptInsecureCerts': bool(envs.get('SELENIUM_INSECURE_CERTS', False)), 'video-upload-on-pass': False, 'sauce-advisor': False, 'capture-html': True, 'record-screenshots': True, 'max-duration': 600, 'public': 'public restricted', 'tags': tags, } if _use_remote_browser(SAUCE_ENV_VARS): sauce_capabilities = { 'platform': envs['SELENIUM_PLATFORM'], 'version': envs['SELENIUM_VERSION'], 'username': envs['SAUCE_USER_NAME'], 'accessKey': envs['SAUCE_API_KEY'], } capabilities.update(sauce_capabilities) if 'JOB_NAME' in envs: jenkins_vars = { 'build': envs['BUILD_NUMBER'], 'name': envs['JOB_NAME'], } capabilities.update(jenkins_vars) return capabilities
Convert the dictionary of environment variables to a dictionary of desired capabilities to send to the Remote WebDriver. `tags` is a list of string tags to apply to the SauceLabs job.
def pch_emitter(target, source, env): validate_vars(env) pch = None obj = None for t in target: if SCons.Util.splitext(str(t))[1] == '.pch': pch = t if SCons.Util.splitext(str(t))[1] == '.obj': obj = t if not obj: obj = SCons.Util.splitext(str(pch))[0]+'.obj' target = [pch, obj] return (target, source)
Adds the object file target.
def get_configdir(name): configdir = os.environ.get('%sCONFIGDIR' % name.upper()) if configdir is not None: return os.path.abspath(configdir) p = None h = _get_home() if ((sys.platform.startswith('linux') or sys.platform.startswith('darwin')) and h is not None): p = os.path.join(h, '.config/' + name) elif h is not None: p = os.path.join(h, '.' + name) if not os.path.exists(p): os.makedirs(p) return p
Return the string representing the configuration directory. The directory is chosen as follows: 1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied, choose that. 2a. On Linux, choose `$HOME/.config`. 2b. On other platforms, choose `$HOME/.matplotlib`. 3. If the chosen directory exists, use that as the configuration directory. 4. A directory: return None. Notes ----- This function is taken from the matplotlib [1] module References ---------- [1]: http://matplotlib.org/api/
def _load_params(params, logger=logging): if isinstance(params, str): cur_path = os.path.dirname(os.path.realpath(__file__)) param_file_path = os.path.join(cur_path, params) logger.info('Loading params from file %s' % param_file_path) save_dict = nd_load(param_file_path) arg_params = {} aux_params = {} for k, v in save_dict.items(): tp, name = k.split(':', 1) if tp == 'arg': arg_params[name] = v if tp == 'aux': aux_params[name] = v return arg_params, aux_params elif isinstance(params, (tuple, list)) and len(params) == 2: return params[0], params[1] else: raise ValueError('Unsupported params provided. Must be either a path to the param file or' ' a pair of dictionaries representing arg_params and aux_params')
Given a str as a path to the .params file or a pair of params, returns two dictionaries representing arg_params and aux_params.
def clean(self, force=False): if self.is_finalized and not force: self.warn("Can't clean; bundle is finalized") return False self.log('---- Cleaning ----') self.state = self.STATES.CLEANING self.dstate = self.STATES.BUILDING self.commit() self.clean_sources() self.clean_tables() self.clean_partitions() self.clean_build() self.clean_files() self.clean_ingested() self.clean_build_state() self.clean_progress() self.state = self.STATES.CLEANED self.commit() return True
Clean generated objects from the dataset, but only if there are File contents to regenerate them
def call_mr_transform(data, opt='', path='./', remove_files=True): r if not import_astropy: raise ImportError('Astropy package not found.') if (not isinstance(data, np.ndarray)) or (data.ndim != 2): raise ValueError('Input data must be a 2D numpy array.') executable = 'mr_transform' is_executable(executable) unique_string = datetime.now().strftime('%Y.%m.%d_%H.%M.%S') file_name = path + 'mr_temp_' + unique_string file_fits = file_name + '.fits' file_mr = file_name + '.mr' fits.writeto(file_fits, data) if isinstance(opt, str): opt = opt.split() try: check_call([executable] + opt + [file_fits, file_mr]) except Exception: warn('{} failed to run with the options provided.'.format(executable)) remove(file_fits) else: result = fits.getdata(file_mr) if remove_files: remove(file_fits) remove(file_mr) return result
r"""Call mr_transform This method calls the iSAP module mr_transform Parameters ---------- data : np.ndarray Input data, 2D array opt : list or str, optional Options to be passed to mr_transform path : str, optional Path for output files (default is './') remove_files : bool, optional Option to remove output files (default is 'True') Returns ------- np.ndarray results of mr_transform Raises ------ ValueError If the input data is not a 2D numpy array Examples -------- >>> from modopt.signal.wavelet import * >>> a = np.arange(9).reshape(3, 3).astype(float) >>> call_mr_transform(a) array([[[-1.5 , -1.125 , -0.75 ], [-0.375 , 0. , 0.375 ], [ 0.75 , 1.125 , 1.5 ]], [[-1.5625 , -1.171875 , -0.78125 ], [-0.390625 , 0. , 0.390625 ], [ 0.78125 , 1.171875 , 1.5625 ]], [[-0.5859375 , -0.43945312, -0.29296875], [-0.14648438, 0. , 0.14648438], [ 0.29296875, 0.43945312, 0.5859375 ]], [[ 3.6484375 , 3.73632812, 3.82421875], [ 3.91210938, 4. , 4.08789062], [ 4.17578125, 4.26367188, 4.3515625 ]]], dtype=float32)
def walk_recursive(f, data): results = {} if isinstance(data, list): return [walk_recursive(f, d) for d in data] elif isinstance(data, dict): results = funcy.walk_keys(f, data) for k, v in data.iteritems(): if isinstance(v, dict): results[f(k)] = walk_recursive(f, v) elif isinstance(v, list): results[f(k)] = [walk_recursive(f, d) for d in v] else: return f(data) return results
Recursively apply a function to all dicts in a nested dictionary :param f: Function to apply :param data: Dictionary (possibly nested) to recursively apply function to :return:
def set_loader(self, loader, destructor, state): return lib.zcertstore_set_loader(self._as_parameter_, loader, destructor, state)
Override the default disk loader with a custom loader fn.
def _raise_error_if_not_drawing_classifier_input_sframe( dataset, feature, target): from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe _raise_error_if_not_sframe(dataset) if feature not in dataset.column_names(): raise _ToolkitError("Feature column '%s' does not exist" % feature) if target not in dataset.column_names(): raise _ToolkitError("Target column '%s' does not exist" % target) if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list): raise _ToolkitError("Feature column must contain images" + " or stroke-based drawings encoded as lists of strokes" + " where each stroke is a list of points and" + " each point is stored as a dictionary") if dataset[target].dtype != int and dataset[target].dtype != str: raise _ToolkitError("Target column contains " + str(dataset[target].dtype) + " but it must contain strings or integers to represent" + " labels for drawings.") if len(dataset) == 0: raise _ToolkitError("Input Dataset is empty!")
Performs some sanity checks on the SFrame provided as input to `turicreate.drawing_classifier.create` and raises a ToolkitError if something in the dataset is missing or wrong.
def get_extensions(self, data=False): ext_list = [key for key in self.__dict__ if type(self.__dict__[key]) is Extension] for key in ext_list: if data: yield getattr(self, key) else: yield key
Yields the extensions or their names Parameters ---------- data : boolean, optional If True, returns a generator which yields the extensions. If False, returns a generator which yields the names of the extensions (default) Returns ------- Generator for Extension or string
def linsert(self, key, pivot, value, before=False): where = b'AFTER' if not before else b'BEFORE' return self.execute(b'LINSERT', key, where, pivot, value)
Inserts value in the list stored at key either before or after the reference value pivot.
def to_placeholder(self, name=None, db_type=None): if name is None: placeholder = self.unnamed_placeholder else: placeholder = self.named_placeholder.format(name) if db_type: return self.typecast(placeholder, db_type) else: return placeholder
Returns a placeholder for the specified name, by applying the instance's format strings. :name: if None an unamed placeholder is returned, otherwise a named placeholder is returned. :db_type: if not None the placeholder is typecast.
def round(self, decimals=0, *args, **kwargs): nv.validate_round(args, kwargs) result = com.values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result
Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64
def _update_to_s3_uri(property_key, resource_property_dict, s3_uri_value="s3://bucket/value"): uri_property = resource_property_dict.get(property_key, ".") if isinstance(uri_property, dict) or SamTemplateValidator.is_s3_uri(uri_property): return resource_property_dict[property_key] = s3_uri_value
Updates the 'property_key' in the 'resource_property_dict' to the value of 's3_uri_value' Note: The function will mutate the resource_property_dict that is pass in Parameters ---------- property_key str, required Key in the resource_property_dict resource_property_dict dict, required Property dictionary of a Resource in the template to replace s3_uri_value str, optional Value to update the value of the property_key to
def sudo(command, show=True, *args, **kwargs): if show: print_command(command) with hide("running"): return _sudo(command, *args, **kwargs)
Runs a command as sudo on the remote server.
def _get_inline_fragment(ast): if not ast.selection_set: return None fragments = [ ast_node for ast_node in ast.selection_set.selections if isinstance(ast_node, InlineFragment) ] if not fragments: return None if len(fragments) > 1: raise GraphQLCompilationError(u'Cannot compile GraphQL with more than one fragment in ' u'a given selection set.') return fragments[0]
Return the inline fragment at the current AST node, or None if no fragment exists.
def SetValue(self, row, col, value): self.dataframe.iloc[row, col] = value
Set value in the pandas DataFrame
def total_bytes_billed(self): result = self._job_statistics().get("totalBytesBilled") if result is not None: result = int(result) return result
Return total bytes billed from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled :rtype: int or None :returns: total bytes processed by the job, or None if job is not yet complete.
def register_provider(cls, provider): def decorator(subclass): cls._providers[provider] = subclass subclass.name = provider return subclass return decorator
Register method to keep list of providers.
def set_blacklisted_filepaths(self, filepaths, remove_from_stored=True): filepaths = util.to_absolute_paths(filepaths) self.blacklisted_filepaths = filepaths if remove_from_stored: self.plugin_filepaths = util.remove_from_set(self.plugin_filepaths, filepaths)
Sets internal blacklisted filepaths to filepaths. If `remove_from_stored` is `True`, any `filepaths` in `self.plugin_filepaths` will be automatically removed. Recommend passing in absolute filepaths but method will attempt to convert to absolute filepaths based on current working directory.
def Reset(self): self._cur_state = self.states['Start'] self._cur_state_name = 'Start' self._result = [] self._ClearAllRecord()
Preserves FSM but resets starting state and current record.
def addattr(self, attrname, value=None, persistent=True): setattr(self, attrname, value) if persistent and attrname not in self.__persistent_attributes__: self.__persistent_attributes__.append(attrname)
Adds an attribute to self. If persistent is True, the attribute will be made a persistent attribute. Persistent attributes are copied whenever a view or copy of this array is created. Otherwise, new views or copies of this will not have the attribute.
def _compute_intensity(ccube, bexpcube): bexp_data = np.sqrt(bexpcube.data[0:-1, 0:] * bexpcube.data[1:, 0:]) intensity_data = ccube.data / bexp_data intensity_map = HpxMap(intensity_data, ccube.hpx) return intensity_map
Compute the intensity map
def set_created_date(self, date=None): if date: match = re.match(DATE_REGEX, date) if not match: raise IOCParseError('Created date is not valid. Must be in the form YYYY-MM-DDTHH:MM:SS') ioc_et.set_root_created_date(self.root, date) return True
Set the created date of a IOC to the current date. User may specify the date they want to set as well. :param date: Date value to set the created date to. This should be in the xsdDate form. This defaults to the current date if it is not provided. xsdDate form: YYYY-MM-DDTHH:MM:SS :return: True :raises: IOCParseError if date format is not valid.
def os_walk(top, *args, **kwargs): if six.PY2 and salt.utils.platform.is_windows(): top_query = top else: top_query = salt.utils.stringutils.to_str(top) for item in os.walk(top_query, *args, **kwargs): yield salt.utils.data.decode(item, preserve_tuples=True)
This is a helper than ensures that all paths returned from os.walk are unicode.
def commit(self): request = self.edits().commit(**self.build_params()).execute() print 'Edit "%s" has been committed' % (request['id']) self.edit_id = None
commit current edits.
def simplex_projection(v, b=1): r v = np.asarray(v) p = len(v) v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho + 1)]) w = (v - theta) w[w < 0] = 0 return w
r"""Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> proj # doctest: +NORMALIZE_WHITESPACE array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print(proj.sum()) 1.0 Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu) Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com).
def _aux_types(self): aux_types = [] num_aux = self._num_aux for i in range(num_aux): aux_types.append(self._aux_type(i)) return aux_types
The data types of the aux data for the BaseSparseNDArray.
def escape_newlines(s: str) -> str: if not s: return s s = s.replace("\\", r"\\") s = s.replace("\n", r"\n") s = s.replace("\r", r"\r") return s
Escapes CR, LF, and backslashes. Its counterpart is :func:`unescape_newlines`. ``s.encode("string_escape")`` and ``s.encode("unicode_escape")`` are alternatives, but they mess around with quotes, too (specifically, backslash-escaping single quotes).
def uncontract_general(basis, use_copy=True): if use_copy: basis = copy.deepcopy(basis) for k, el in basis['elements'].items(): if not 'electron_shells' in el: continue newshells = [] for sh in el['electron_shells']: if len(sh['coefficients']) == 1 or len(sh['angular_momentum']) > 1: newshells.append(sh) else: if len(sh['angular_momentum']) == 1: for c in sh['coefficients']: newsh = sh.copy() newsh['coefficients'] = [c] newshells.append(newsh) el['electron_shells'] = newshells return prune_basis(basis, False)
Removes the general contractions from a basis set The input basis set is not modified. The returned basis may have functions with coefficients of zero and may have duplicate shells. If use_copy is True, the input basis set is not modified.
def Images2Rgbd(rgb, d): data = Rgbd() data.color=imageMsg2Image(rgb) data.depth=imageMsg2Image(d) data.timeStamp = rgb.header.stamp.secs + (rgb.header.stamp.nsecs *1e-9) return data
Translates from ROS Images to JderobotTypes Rgbd. @param rgb: ROS color Image to translate @param d: ROS depth image to translate @type rgb: ImageROS @type d: ImageROS @return a Rgbd translated from Images
def validate_value(self, value): if self.readonly: raise ValidationError(self.record, "Cannot set readonly field '{}'".format(self.name)) if value not in (None, self._unset): if self.supported_types and not isinstance(value, tuple(self.supported_types)): raise ValidationError(self.record, "Field '{}' expects one of {}, got '{}' instead".format( self.name, ', '.join([repr(t.__name__) for t in self.supported_types]), type(value).__name__) )
Validate value is an acceptable type during set_python operation
def between(self, objs1: List[float], objs2: List[float], n=1): from desdeo.preference.base import ReferencePoint objs1_arr = np.array(objs1) objs2_arr = np.array(objs2) segments = n + 1 diff = objs2_arr - objs1_arr solutions = [] for x in range(1, segments): btwn_obj = objs1_arr + float(x) / segments * diff solutions.append( self._get_ach().result(ReferencePoint(self, btwn_obj), None) ) return ResultSet(solutions)
Generate `n` solutions which attempt to trade-off `objs1` and `objs2`. Parameters ---------- objs1 First boundary point for desired objective function values objs2 Second boundary point for desired objective function values n Number of solutions to generate
def process_item(self, item): group, value = item['group'], item['value'] if group in self._groups: cur_val = self._groups[group] self._groups[group] = max(cur_val, value) else: self._src.tracking = False new_max = value for rec in self._src.query(criteria={'group': group}, properties=['value']): new_max = max(new_max, rec['value']) self._src.tracking = True self._groups[group] = new_max
Calculate new maximum value for each group, for "new" items only.
def PmfProbLess(pmf1, pmf2): total = 0.0 for v1, p1 in pmf1.Items(): for v2, p2 in pmf2.Items(): if v1 < v2: total += p1 * p2 return total
Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability
def _GetStatus(self, two_factor=False): params = ['status'] if two_factor: params += ['--twofactor'] retcode = self._RunOsLoginControl(params) if retcode is None: if self.oslogin_installed: self.logger.warning('OS Login not installed.') self.oslogin_installed = False return None self.oslogin_installed = True if not os.path.exists(constants.OSLOGIN_NSS_CACHE): return False return not retcode
Check whether OS Login is installed. Args: two_factor: bool, True if two factor should be enabled. Returns: bool, True if OS Login is installed.
def load_remote_settings(self, remote_bucket, remote_file): if not self.session: boto_session = boto3.Session() else: boto_session = self.session s3 = boto_session.resource('s3') try: remote_env_object = s3.Object(remote_bucket, remote_file).get() except Exception as e: print('Could not load remote settings file.', e) return try: content = remote_env_object['Body'].read() except Exception as e: print('Exception while reading remote settings file.', e) return try: settings_dict = json.loads(content) except (ValueError, TypeError): print('Failed to parse remote settings!') return for key, value in settings_dict.items(): if self.settings.LOG_LEVEL == "DEBUG": print('Adding {} -> {} to environment'.format( key, value )) try: os.environ[str(key)] = value except Exception: if self.settings.LOG_LEVEL == "DEBUG": print("Environment variable keys must be non-unicode!")
Attempt to read a file from s3 containing a flat json object. Adds each key->value pair as environment variables. Helpful for keeping sensitiZve or stage-specific configuration variables in s3 instead of version control.