code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def locked_put(self, credentials): filters = {self.key_name: self.key_value} query = self.session.query(self.model_class).filter_by(**filters) entity = query.first() if not entity: entity = self.model_class(**filters) setattr(entity, self.property_name, credentials) self.session.add(entity)
Write a credentials to the SQLAlchemy datastore. Args: credentials: :class:`oauth2client.Credentials`
def create_commands(self, commands, parser): self.apply_defaults(commands) def create_single_command(command): keys = command['keys'] del command['keys'] kwargs = {} for item in command: kwargs[item] = command[item] parser.add_argument(*keys, **kwargs) if len(commands) > 1: for command in commands: create_single_command(command) else: create_single_command(commands[0])
add commands to parser
def update(kernel=False): manager = MANAGER cmds = {'yum -y --color=never': {False: '--exclude=kernel* update', True: 'update'}} cmd = cmds[manager][kernel] run_as_root("%(manager)s %(cmd)s" % locals())
Upgrade all packages, skip obsoletes if ``obsoletes=0`` in ``yum.conf``. Exclude *kernel* upgrades by default.
def update_reportnumbers(self): report_037_fields = record_get_field_instances(self.record, '037') for field in report_037_fields: subs = field_get_subfields(field) for val in subs.get("a", []): if "arXiv" not in val: record_delete_field(self.record, tag="037", field_position_global=field[4]) new_subs = [(code, val[0]) for code, val in subs.items()] record_add_field(self.record, "088", subfields=new_subs) break
Update reportnumbers.
def item_fields(self): if self.templates.get("item_fields") and not self._updated( "/itemFields", self.templates["item_fields"], "item_fields" ): return self.templates["item_fields"]["tmplt"] query_string = "/itemFields" retrieved = self._retrieve_data(query_string) return self._cache(retrieved, "item_fields")
Get all available item fields
def _calcSkipRecords(numIngested, windowSize, learningPeriod): numShiftedOut = max(0, numIngested - windowSize) return min(numIngested, max(0, learningPeriod - numShiftedOut))
Return the value of skipRecords for passing to estimateAnomalyLikelihoods If `windowSize` is very large (bigger than the amount of data) then this could just return `learningPeriod`. But when some values have fallen out of the historical sliding window of anomaly records, then we have to take those into account as well so we return the `learningPeriod` minus the number shifted out. :param numIngested - (int) number of data points that have been added to the sliding window of historical data points. :param windowSize - (int) size of sliding window of historical data points. :param learningPeriod - (int) the number of iterations required for the algorithm to learn the basic patterns in the dataset and for the anomaly score to 'settle down'.
def get_course_or_program_context(self, enterprise_customer, course_id=None, program_uuid=None): context_data = {} if course_id: context_data.update({'course_id': course_id, 'course_specific': True}) if not self.preview_mode: try: catalog_api_client = CourseCatalogApiServiceClient(enterprise_customer.site) except ImproperlyConfigured: raise Http404 course_run_details = catalog_api_client.get_course_run(course_id) course_start_date = '' if course_run_details['start']: course_start_date = parse(course_run_details['start']).strftime('%B %d, %Y') context_data.update({ 'course_title': course_run_details['title'], 'course_start_date': course_start_date, }) else: context_data.update({ 'course_title': 'Demo Course', 'course_start_date': datetime.datetime.now().strftime('%B %d, %Y'), }) else: context_data.update({ 'program_uuid': program_uuid, 'program_specific': True, }) return context_data
Return a dict having course or program specific keys for data sharing consent page.
def get_files_to_commit(autooptions): workingdir = autooptions['working-directory'] includes = autooptions['track']['includes'] excludes = autooptions['track']['excludes'] includes = r'|'.join([fnmatch.translate(x) for x in includes]) excludes = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.' matched_files = [] for root, dirs, files in os.walk(workingdir): dirs[:] = [d for d in dirs if not re.match(excludes, d)] files = [f for f in files if not re.match(excludes, f)] files = [f for f in files if re.match(includes, f)] files = [os.path.join(root, f) for f in files] matched_files.extend(files) return matched_files
Look through the local directory to pick up files to check
def populate_subtasks(self, context, sg, parent_job_id): db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None ports = db_api.sg_gather_associated_ports(context, db_sg) if len(ports) == 0: return {"ports": 0} for port in ports: job_body = dict(action="update port %s" % port['id'], tenant_id=db_sg['tenant_id'], resource_id=port['id'], parent_id=parent_job_id) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_consumer = QuarkSGAsyncConsumerClient() try: rpc_consumer.update_port(context, port['id'], job['id']) except om_exc.MessagingTimeout: LOG.error("Failed to update port. Rabbit running?") return None
Produces a list of ports to be updated async.
def new_symbolic_value(self, nbits, label=None, taint=frozenset()): assert nbits in (1, 4, 8, 16, 32, 64, 128, 256) avoid_collisions = False if label is None: label = 'val' avoid_collisions = True expr = self._constraints.new_bitvec(nbits, name=label, taint=taint, avoid_collisions=avoid_collisions) self._input_symbols.append(expr) return expr
Create and return a symbolic value that is `nbits` bits wide. Assign the value to a register or write it into the address space to introduce it into the program state. :param int nbits: The bitwidth of the value returned :param str label: The label to assign to the value :param taint: Taint identifier of this value :type taint: tuple or frozenset :return: :class:`~manticore.core.smtlib.expression.Expression` representing the value
def not_followed_by(parser): @tri def not_followed_by_block(): failed = object() result = optional(tri(parser), failed) if result != failed: fail(["not " + _fun_to_str(parser)]) choice(not_followed_by_block)
Succeeds if the given parser cannot consume input
def fit(self, X, y=None): if is_integer(X): dim = X else: X = as_features(X) dim = X.dim M = self.smoothness inds = np.mgrid[(slice(M + 1),) * dim].reshape(dim, (M + 1) ** dim).T self.inds_ = inds[(inds ** 2).sum(axis=1) <= M ** 2] return self
Picks the elements of the basis to use for the given data. Only depends on the dimension of X. If it's more convenient, you can pass a single integer for X, which is the dimension to use. Parameters ---------- X : an integer, a :class:`Features` instance, or a list of bag features The input data, or just its dimension, since only the dimension is needed here.
def is_user_enrolled(cls, user, course_id, course_mode): enrollment_client = EnrollmentApiClient() try: enrollments = enrollment_client.get_course_enrollment(user.username, course_id) if enrollments and course_mode == enrollments.get('mode'): return True except HttpClientError as exc: logging.error( 'Error while checking enrollment status of user %(user)s: %(message)s', dict(user=user.username, message=str(exc)) ) except KeyError as exc: logging.warning( 'Error while parsing enrollment data of user %(user)s: %(message)s', dict(user=user.username, message=str(exc)) ) return False
Query the enrollment API and determine if a learner is enrolled in a given course run track. Args: user: The user whose enrollment needs to be checked course_mode: The mode with which the enrollment should be checked course_id: course id of the course where enrollment should be checked. Returns: Boolean: Whether or not enrollment exists
def figure_protocol(self): self.log.debug("creating overlayed protocols plot") self.figure() plt.plot(self.abf.protoX,self.abf.protoY,color='r') self.marginX=0 self.decorate(protocol=True)
plot the current sweep protocol.
def parse(filename): for event, elt in et.iterparse(filename, events= ('start', 'end', 'comment', 'pi'), huge_tree=True): if event == 'start': obj = _elt2obj(elt) obj['type'] = ENTER yield obj if elt.text: yield {'type': TEXT, 'text': elt.text} elif event == 'end': yield {'type': EXIT} if elt.tail: yield {'type': TEXT, 'text': elt.tail} elt.clear() elif event == 'comment': yield {'type': COMMENT, 'text': elt.text} elif event == 'pi': yield {'type': PI, 'text': elt.text} else: assert False, (event, elt)
Parses file content into events stream
def to_type_constructor(value, python_path=None): if not value: return value if callable(value): return {'datatype': value} value = to_type(value) typename = value.get('typename') if typename: r = aliases.resolve(typename) try: value['datatype'] = importer.import_symbol( r, python_path=python_path) del value['typename'] except Exception as e: value['_exception'] = e return value
Tries to convert a value to a type constructor. If value is a string, then it used as the "typename" field. If the "typename" field exists, the symbol for that name is imported and added to the type constructor as a field "datatype". Throws: ImportError -- if "typename" is set but cannot be imported ValueError -- if "typename" is malformed
def docstring(docstr): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) wrapper.__doc__ = docstr return wrapper return decorator
Decorates a function with the given docstring Parameters ---------- docstr : string
def get_all_firewalls(self): data = self.get_data("firewalls") firewalls = list() for jsoned in data['firewalls']: firewall = Firewall(**jsoned) firewall.token = self.token in_rules = list() for rule in jsoned['inbound_rules']: in_rules.append(InboundRule(**rule)) firewall.inbound_rules = in_rules out_rules = list() for rule in jsoned['outbound_rules']: out_rules.append(OutboundRule(**rule)) firewall.outbound_rules = out_rules firewalls.append(firewall) return firewalls
This function returns a list of Firewall objects.
def tf_import_experience(self, states, internals, actions, terminal, reward): return self.memory.store( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
Imports experiences into the TensorFlow memory structure. Can be used to import off-policy data. :param states: Dict of state values to import with keys as state names and values as values to set. :param internals: Internal values to set, can be fetched from agent via agent.current_internals if no values available. :param actions: Dict of action values to import with keys as action names and values as values to set. :param terminal: Terminal value(s) :param reward: Reward value(s)
def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild
Finds the last element beneath this object to be parsed.
def locked_put(self, credentials): entity, _ = self.model_class.objects.get_or_create( **{self.key_name: self.key_value}) setattr(entity, self.property_name, credentials) entity.save()
Write a Credentials to the Django datastore. Args: credentials: Credentials, the credentials to store.
def filter_by(zips=_zips, **kwargs): return [z for z in zips if all([k in z and z[k] == v for k, v in kwargs.items()])]
Use `kwargs` to select for desired attributes from list of zipcode dicts
def getaccesskey(self, window_name, object_name): menu_handle = self._get_menu_handle(window_name, object_name) key = menu_handle.AXMenuItemCmdChar modifiers = menu_handle.AXMenuItemCmdModifiers glpyh = menu_handle.AXMenuItemCmdGlyph virtual_key = menu_handle.AXMenuItemCmdVirtualKey modifiers_type = "" if modifiers == 0: modifiers_type = "<command>" elif modifiers == 1: modifiers_type = "<shift><command>" elif modifiers == 2: modifiers_type = "<option><command>" elif modifiers == 3: modifiers_type = "<option><shift><command>" elif modifiers == 4: modifiers_type = "<ctrl><command>" elif modifiers == 6: modifiers_type = "<ctrl><option><command>" if virtual_key == 115 and glpyh == 102: modifiers = "<option>" key = "<cursor_left>" elif virtual_key == 119 and glpyh == 105: modifiers = "<option>" key = "<right>" elif virtual_key == 116 and glpyh == 98: modifiers = "<option>" key = "<up>" elif virtual_key == 121 and glpyh == 107: modifiers = "<option>" key = "<down>" elif virtual_key == 126 and glpyh == 104: key = "<up>" elif virtual_key == 125 and glpyh == 106: key = "<down>" elif virtual_key == 124 and glpyh == 101: key = "<right>" elif virtual_key == 123 and glpyh == 100: key = "<left>" elif virtual_key == 53 and glpyh == 27: key = "<escape>" if not key: raise LdtpServerException("No access key associated") return modifiers_type + key
Get access key of given object @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: access key in string format on success, else LdtpExecutionError on failure. @rtype: string
def _match(self, **kwargs): for k in kwargs.keys(): try: val = getattr(self, k) except _a11y.Error: return False if sys.version_info[:2] <= (2, 6): if isinstance(val, basestring): if not fnmatch.fnmatch(unicode(val), kwargs[k]): return False else: if val != kwargs[k]: return False elif sys.version_info[0] == 3: if isinstance(val, str): if not fnmatch.fnmatch(val, str(kwargs[k])): return False else: if val != kwargs[k]: return False else: if isinstance(val, str) or isinstance(val, unicode): if not fnmatch.fnmatch(val, kwargs[k]): return False else: if val != kwargs[k]: return False return True
Method which indicates if the object matches specified criteria. Match accepts criteria as kwargs and looks them up on attributes. Actual matching is performed with fnmatch, so shell-like wildcards work within match strings. Examples: obj._match(AXTitle='Terminal*') obj._match(AXRole='TextField', AXRoleDescription='search text field')
def create_from_snapshot(self, *args, **kwargs): data = self.get_data('volumes/', type=POST, params={'name': self.name, 'snapshot_id': self.snapshot_id, 'region': self.region, 'size_gigabytes': self.size_gigabytes, 'description': self.description, 'filesystem_type': self.filesystem_type, 'filesystem_label': self.filesystem_label }) if data: self.id = data['volume']['id'] self.created_at = data['volume']['created_at'] return self
Creates a Block Storage volume Note: Every argument and parameter given to this method will be assigned to the object. Args: name: string - a name for the volume snapshot_id: string - unique identifier for the volume snapshot size_gigabytes: int - size of the Block Storage volume in GiB filesystem_type: string, optional - name of the filesystem type the volume will be formated with ('ext4' or 'xfs') filesystem_label: string, optional - the label to be applied to the filesystem, only used in conjunction with filesystem_type Optional Args: description: string - text field to describe a volume
def show_worst_drawdown_periods(returns, top=5): drawdown_df = timeseries.gen_drawdown_table(returns, top=top) utils.print_table( drawdown_df.sort_values('Net drawdown in %', ascending=False), name='Worst drawdown periods', float_format='{0:.2f}'.format, )
Prints information about the worst drawdown periods. Prints peak dates, valley dates, recovery dates, and net drawdowns. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. top : int, optional Amount of top drawdowns periods to plot (default 5).
def verify_valid_gdb_subprocess(self): if not self.gdb_process: raise NoGdbProcessError("gdb process is not attached") elif self.gdb_process.poll() is not None: raise NoGdbProcessError( "gdb process has already finished with return code: %s" % str(self.gdb_process.poll()) )
Verify there is a process object, and that it is still running. Raise NoGdbProcessError if either of the above are not true.
def _slugify_foreign_key(schema): for foreign_key in schema.get('foreignKeys', []): foreign_key['reference']['resource'] = _slugify_resource_name( foreign_key['reference'].get('resource', '')) return schema
Slugify foreign key
def run_multiple(self, workingArea, package_indices): if not package_indices: return [ ] job_desc = self._compose_job_desc(workingArea, package_indices) clusterprocids = submit_jobs(job_desc, cwd=workingArea.path) clusterids = clusterprocids2clusterids(clusterprocids) for clusterid in clusterids: change_job_priority([clusterid], 10) self.clusterprocids_outstanding.extend(clusterprocids) return clusterprocids
Submit multiple jobs Parameters ---------- workingArea : A workingArea package_indices : list(int) A list of package indices Returns ------- list(str) The list of the run IDs of the jobs
def _syspath_modname_to_modpath(modname, sys_path=None, exclude=None): def _isvalid(modpath, base): subdir = dirname(modpath) while subdir and subdir != base: if not exists(join(subdir, '__init__.py')): return False subdir = dirname(subdir) return True _fname_we = modname.replace('.', os.path.sep) candidate_fnames = [ _fname_we + '.py', ] candidate_fnames += [_fname_we + ext for ext in _platform_pylib_exts()] if sys_path is None: sys_path = sys.path candidate_dpaths = ['.' if p == '' else p for p in sys_path] if exclude: def normalize(p): if sys.platform.startswith('win32'): return realpath(p).lower() else: return realpath(p) real_exclude = {normalize(p) for p in exclude} candidate_dpaths = [p for p in candidate_dpaths if normalize(p) not in real_exclude] for dpath in candidate_dpaths: modpath = join(dpath, _fname_we) if exists(modpath): if isfile(join(modpath, '__init__.py')): if _isvalid(modpath, dpath): return modpath for fname in candidate_fnames: modpath = join(dpath, fname) if isfile(modpath): if _isvalid(modpath, dpath): return modpath
syspath version of modname_to_modpath Args: modname (str): name of module to find sys_path (List[PathLike], default=None): if specified overrides `sys.path` exclude (List[PathLike], default=None): list of directory paths. if specified prevents these directories from being searched. Notes: This is much slower than the pkgutil mechanisms. CommandLine: python -m xdoctest.static_analysis _syspath_modname_to_modpath Example: >>> print(_syspath_modname_to_modpath('xdoctest.static_analysis')) ...static_analysis.py >>> print(_syspath_modname_to_modpath('xdoctest')) ...xdoctest >>> print(_syspath_modname_to_modpath('_ctypes')) ..._ctypes... >>> assert _syspath_modname_to_modpath('xdoctest', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('xdoctest.static_analysis', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('_ctypes', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('this', sys_path=[]) is None Example: >>> # test what happens when the module is not visible in the path >>> modname = 'xdoctest.static_analysis' >>> modpath = _syspath_modname_to_modpath(modname) >>> exclude = [split_modpath(modpath)[0]] >>> found = _syspath_modname_to_modpath(modname, exclude=exclude) >>> # this only works if installed in dev mode, pypi fails >>> assert found is None, 'should not have found {}'.format(found)
def process_dir(self, album, force=False): for f in album: if isfile(f.dst_path) and not force: self.logger.info("%s exists - skipping", f.filename) self.stats[f.type + '_skipped'] += 1 else: self.stats[f.type] += 1 yield (f.type, f.path, f.filename, f.src_path, album.dst_path, self.settings)
Process a list of images in a directory.
def _in_gae_environment(): if SETTINGS.env_name is not None: return SETTINGS.env_name in ('GAE_PRODUCTION', 'GAE_LOCAL') try: import google.appengine except ImportError: pass else: server_software = os.environ.get(_SERVER_SOFTWARE, '') if server_software.startswith('Google App Engine/'): SETTINGS.env_name = 'GAE_PRODUCTION' return True elif server_software.startswith('Development/'): SETTINGS.env_name = 'GAE_LOCAL' return True return False
Detects if the code is running in the App Engine environment. Returns: True if running in the GAE environment, False otherwise.
def getExtraIncludes(self): if 'extraIncludes' in self.description: return [os.path.normpath(x) for x in self.description['extraIncludes']] else: return []
Some components must export whole directories full of headers into the search path. This is really really bad, and they shouldn't do it, but support is provided as a concession to compatibility.
async def play_at(self, index: int): self.queue = self.queue[min(index, len(self.queue) - 1):len(self.queue)] await self.play(ignore_shuffle=True)
Play the queue from a specific point. Disregards tracks before the index.
def _accumulateFrequencyCounts(values, freqCounts=None): values = numpy.array(values) numEntries = values.max() + 1 if freqCounts is not None: numEntries = max(numEntries, freqCounts.size) if freqCounts is not None: if freqCounts.size != numEntries: newCounts = numpy.zeros(numEntries, dtype='int32') newCounts[0:freqCounts.size] = freqCounts else: newCounts = freqCounts else: newCounts = numpy.zeros(numEntries, dtype='int32') for v in values: newCounts[v] += 1 return newCounts
Accumulate a list of values 'values' into the frequency counts 'freqCounts', and return the updated frequency counts For example, if values contained the following: [1,1,3,5,1,3,5], and the initial freqCounts was None, then the return value would be: [0,3,0,2,0,2] which corresponds to how many of each value we saw in the input, i.e. there were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's. If freqCounts is not None, the values will be added to the existing counts and the length of the frequency Counts will be automatically extended as necessary Parameters: ----------------------------------------------- values: The values to accumulate into the frequency counts freqCounts: Accumulated frequency counts so far, or none
def input(self, input, song): try: cmd = getattr(self, self.CMD_MAP[input][1]) except (IndexError, KeyError): return self.screen.print_error( "Invalid command {!r}!".format(input)) cmd(song)
Input callback, handles key presses
def generateDataset(aggregationInfo, inputFilename, outputFilename=None): inputFullPath = resource_filename("nupic.datafiles", inputFilename) inputObj = FileRecordStream(inputFullPath) aggregator = Aggregator(aggregationInfo=aggregationInfo, inputFields=inputObj.getFields()) if aggregator.isNullAggregation(): return inputFullPath if outputFilename is None: outputFilename = 'agg_%s' % \ os.path.splitext(os.path.basename(inputFullPath))[0] timePeriods = 'years months weeks days '\ 'hours minutes seconds milliseconds microseconds' for k in timePeriods.split(): if aggregationInfo.get(k, 0) > 0: outputFilename += '_%s_%d' % (k, aggregationInfo[k]) outputFilename += '.csv' outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename) lockFilePath = outputFilename + '.please_wait' if os.path.isfile(outputFilename) or \ os.path.isfile(lockFilePath): while os.path.isfile(lockFilePath): print 'Waiting for %s to be fully written by another process' % \ lockFilePath time.sleep(1) return outputFilename lockFD = open(lockFilePath, 'w') outputObj = FileRecordStream(streamID=outputFilename, write=True, fields=inputObj.getFields()) while True: inRecord = inputObj.getNextRecord() (aggRecord, aggBookmark) = aggregator.next(inRecord, None) if aggRecord is None and inRecord is None: break if aggRecord is not None: outputObj.appendRecord(aggRecord) return outputFilename
Generate a dataset of aggregated values Parameters: ---------------------------------------------------------------------------- aggregationInfo: a dictionary that contains the following entries - fields: a list of pairs. Each pair is a field name and an aggregation function (e.g. sum). The function will be used to aggregate multiple values during the aggregation period. aggregation period: 0 or more of unit=value fields; allowed units are: [years months] | [weeks days hours minutes seconds milliseconds microseconds] NOTE: years and months are mutually-exclusive with the other units. See getEndTime() and _aggregate() for more details. Example1: years=1, months=6, Example2: hours=1, minutes=30, If none of the period fields are specified or if all that are specified have values of 0, then aggregation will be suppressed, and the given inputFile parameter value will be returned. inputFilename: filename of the input dataset within examples/prediction/data outputFilename: name for the output file. If not given, a name will be generated based on the input filename and the aggregation params retval: Name of the generated output file. This will be the same as the input file name if no aggregation needed to be performed If the input file contained a time field, sequence id field or reset field that were not specified in aggregationInfo fields, those fields will be added automatically with the following rules: 1. The order will be R, S, T, rest of the fields 2. The aggregation function for all will be to pick the first: lambda x: x[0] Returns: the path of the aggregated data file if aggregation was performed (in the same directory as the given input file); if aggregation did not need to be performed, then the given inputFile argument value is returned.
def __intermediate_interface(self, interface, uci_name): interface.update({ '.type': 'interface', '.name': uci_name, 'ifname': interface.pop('name') }) if 'network' in interface: del interface['network'] if 'mac' in interface: if interface.get('type') != 'wireless': interface['macaddr'] = interface['mac'] del interface['mac'] if 'autostart' in interface: interface['auto'] = interface['autostart'] del interface['autostart'] if 'disabled' in interface: interface['enabled'] = not interface['disabled'] del interface['disabled'] if 'wireless' in interface: del interface['wireless'] if 'addresses' in interface: del interface['addresses'] return interface
converts NetJSON interface to UCI intermediate data structure
def _parent_filter(self, parent, relationship, **kwargs): if parent is None or relationship is None: return {} parent_filter_kwargs = {} query_params = ((self._reverse_rel_name(relationship), parent),) parent_filter_kwargs['query'] = query_params if kwargs.get('workflow_job_template', None) is None: parent_data = self.read(pk=parent)['results'][0] parent_filter_kwargs['workflow_job_template'] = parent_data[ 'workflow_job_template'] return parent_filter_kwargs
Returns filtering parameters to limit a search to the children of a particular node by a particular relationship.
def dir_maker(path): directory = os.path.dirname(path) if directory != '' and not os.path.isdir(directory): try: os.makedirs(directory) except OSError as e: sys.exit('Failed to create directory: {}'.format(e))
Create a directory if it does not exist.
def paragraphs(quantity=2, separator='\n\n', wrap_start='', wrap_end='', html=False, sentences_quantity=3, as_list=False): if html: wrap_start = '<p>' wrap_end = '</p>' separator = '\n\n' result = [] try: for _ in xrange(0, quantity): result.append(wrap_start + sentences(sentences_quantity) + wrap_end) except NameError: for _ in range(0, quantity): result.append(wrap_start + sentences(sentences_quantity) + wrap_end) if as_list: return result else: return separator.join(result)
Return random paragraphs.
def build_SVG_dict(self): zoom = self._zoom layout = self._layout builder = self._builder bbox = list(map(lambda f: f * zoom, layout.bounding_box)) builder.bounding_box = bbox flip_x = bbox[2] + bbox[0] * 2 flip_y = bbox[3] + bbox[1] * 2 instructions = list(layout.walk_instructions( lambda i: (flip_x - (i.x + i.width) * zoom, flip_y - (i.y + i.height) * zoom, i.instruction))) instructions.sort(key=lambda x_y_i: x_y_i[2].render_z) for x, y, instruction in instructions: render_z = instruction.render_z z_id = ("" if not render_z else "-{}".format(render_z)) layer_id = "row-{}{}".format(instruction.row.id, z_id) def_id = self._register_instruction_in_defs(instruction) scale = self._symbol_id_to_scale[def_id] group = { "@class": "instruction", "@id": "instruction-{}".format(instruction.id), "@transform": "translate({},{}),scale({})".format( x, y, scale) } builder.place_svg_use(def_id, layer_id, group) builder.insert_defs(self._instruction_type_color_to_symbol.values()) return builder.get_svg_dict()
Go through the layout and build the SVG. :return: an xml dict that can be exported using a :class:`~knittingpattern.Dumper.XMLDumper` :rtype: dict
def dictDiff(da, db): different = False resultDict = dict() resultDict['inAButNotInB'] = set(da) - set(db) if resultDict['inAButNotInB']: different = True resultDict['inBButNotInA'] = set(db) - set(da) if resultDict['inBButNotInA']: different = True resultDict['differentValues'] = [] for key in (set(da) - resultDict['inAButNotInB']): comparisonResult = da[key] == db[key] if isinstance(comparisonResult, bool): isEqual = comparisonResult else: isEqual = comparisonResult.all() if not isEqual: resultDict['differentValues'].append(key) different = True assert (((resultDict['inAButNotInB'] or resultDict['inBButNotInA'] or resultDict['differentValues']) and different) or not different) return resultDict if different else None
Compares two python dictionaries at the top level and return differences da: first dictionary db: second dictionary Returns: None if dictionaries test equal; otherwise returns a dictionary as follows: { 'inAButNotInB': <sequence of keys that are in da but not in db> 'inBButNotInA': <sequence of keys that are in db but not in da> 'differentValues': <sequence of keys whose corresponding values differ between da and db> }
def _close(self, conn): super(PooledAIODatabase, self)._close(conn) for waiter in self._waiters: if not waiter.done(): logger.debug('Release a waiter') waiter.set_result(True) break
Release waiters.
def proto_02_01_MT70(abf=exampleABF): standard_overlayWithAverage(abf) swhlab.memtest.memtest(abf) swhlab.memtest.checkSweep(abf) swhlab.plot.save(abf,tag='check',resize=False)
repeated membrane tests.
def load_nipy_img(nii_file): import nipy if not os.path.exists(nii_file): raise FileNotFound(nii_file) try: return nipy.load_image(nii_file) except Exception as exc: raise Exception('Reading file {0}.'.format(repr_imgs(nii_file))) from exc
Read a Nifti file and return as nipy.Image Parameters ---------- param nii_file: str Nifti file path Returns ------- nipy.Image
def payload_class_for_element_name(element_name): logger.debug(" looking up payload class for element: {0!r}".format( element_name)) logger.debug(" known: {0!r}".format(STANZA_PAYLOAD_CLASSES)) if element_name in STANZA_PAYLOAD_CLASSES: return STANZA_PAYLOAD_CLASSES[element_name] else: return XMLPayload
Return a payload class for given element name.
def _get_account_and_descendants_(self, account, result): result.append(account) for child in account.accounts: self._get_account_and_descendants_(child, result)
Returns the account and all of it's sub accounts. :param account: The account. :param result: The list to add all the accounts to.
def downsample(self, factor=2): if not isinstance(factor, int) or factor < 1: raise ValueError('factor must be a positive integer.') effect_args = ['downsample', '{}'.format(factor)] self.effects.extend(effect_args) self.effects_log.append('downsample') return self
Downsample the signal by an integer factor. Only the first out of each factor samples is retained, the others are discarded. No decimation filter is applied. If the input is not a properly bandlimited baseband signal, aliasing will occur. This may be desirable e.g., for frequency translation. For a general resampling effect with anti-aliasing, see rate. Parameters ---------- factor : int, default=2 Downsampling factor. See Also -------- rate, upsample
def deploy(file, manager_path, check, dry_run): config = read_deployment_config(file) manager = DeployManager(config=config, filepath=file, manager_path=manager_path, dry_run=dry_run) exception = None if check: manager.check() Printer.print_success('Polyaxon deployment file is valid.') else: try: manager.install() except Exception as e: Printer.print_error('Polyaxon could not be installed.') exception = e if exception: Printer.print_error('Error message `{}`.'.format(exception))
Deploy polyaxon.
def iso_reference_valid_char(c, raise_error=True): if c in ISO_REFERENCE_VALID: return True if raise_error: raise ValueError("'%s' is not in '%s'" % (c, ISO_REFERENCE_VALID)) return False
Helper to make sure the given character is valid for a reference number
def load(self, clear=False): if clear: self.settings = {} defer = [] for conf in pkg_resources.iter_entry_points('pyconfig'): if conf.attrs: raise RuntimeError("config must be a module") mod_name = conf.module_name base_name = conf.name if conf.name != 'any' else None log.info("Loading module '%s'", mod_name) mod_dict = runpy.run_module(mod_name) if mod_dict.get('deferred', None) is deferred: log.info("Deferring module '%s'", mod_name) mod_dict.pop('deferred') defer.append((mod_name, base_name, mod_dict)) continue self._update(mod_dict, base_name) for mod_name, base_name, mod_dict in defer: log.info("Loading deferred module '%s'", mod_name) self._update(mod_dict, base_name) if etcd().configured: mod_dict = etcd().load() if mod_dict: self._update(mod_dict) mod_dict = None try: mod_dict = runpy.run_module('localconfig') except ImportError: pass except ValueError as err: if getattr(err, 'message') != '__package__ set to non-string': raise mod_name = 'localconfig' if sys.version_info < (2, 7): loader, code, fname = runpy._get_module_details(mod_name) else: _, loader, code, fname = runpy._get_module_details(mod_name) mod_dict = runpy._run_code(code, {}, {}, mod_name, fname, loader, pkg_name=None) if mod_dict: log.info("Loading module 'localconfig'") self._update(mod_dict) self.call_reload_hooks()
Loads all the config plugin modules to build a working configuration. If there is a ``localconfig`` module on the python path, it will be loaded last, overriding other settings. :param bool clear: Clear out the previous settings before loading
def delete(self, pk=None, fail_on_missing=False, **kwargs): self._separate(kwargs) return super(Resource, self).\ delete(pk=pk, fail_on_missing=fail_on_missing, **kwargs)
Remove the given notification template. Note here configuration-related fields like 'notification_configuration' and 'channels' will not be used even provided. If `fail_on_missing` is True, then the object's not being found is considered a failure; otherwise, a success with no change is reported. =====API DOCS===== Remove the given object. :param pk: Primary key of the resource to be deleted. :type pk: int :param fail_on_missing: Flag that if set, the object's not being found is considered a failure; otherwise, a success with no change is reported. :type fail_on_missing: bool :param `**kwargs`: Keyword arguments used to look up resource object to delete if ``pk`` is not provided. :returns: dictionary of only one field "changed", which is a flag indicating whether the specified resource is successfully deleted. :rtype: dict =====API DOCS=====
def get(self, q=None, page=None): etag = generate_etag(current_ext.content_version.encode('utf8')) self.check_etag(etag, weak=True) res = jsonify(current_ext.styles) res.set_etag(etag) return res
Get styles.
def init_raspbian_disk(self, yes=0): self.assume_localhost() yes = int(yes) device_question = 'SD card present at %s? ' % self.env.sd_device if not yes and not raw_input(device_question).lower().startswith('y'): return r = self.local_renderer r.local_if_missing( fn='{raspbian_image_zip}', cmd='wget {raspbian_download_url} -O raspbian_lite_latest.zip') r.lenv.img_fn = \ r.local("unzip -l {raspbian_image_zip} | sed -n 4p | awk '{{print $4}}'", capture=True) or '$IMG_FN' r.local('echo {img_fn}') r.local('[ ! -f {img_fn} ] && unzip {raspbian_image_zip} {img_fn} || true') r.lenv.img_fn = r.local('readlink -f {img_fn}', capture=True) r.local('echo {img_fn}') with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir}" ] && umount {sd_media_mount_dir} || true') with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir2}" ] && umount {sd_media_mount_dir2} || true') r.pc('Writing the image onto the card.') r.sudo('time dd bs=4M if={img_fn} of={sd_device}') r.run('sync')
Downloads the latest Raspbian image and writes it to a microSD card. Based on the instructions from: https://www.raspberrypi.org/documentation/installation/installing-images/linux.md
def count_seeds(usort): with open(usort, 'r') as insort: cmd1 = ["cut", "-f", "2"] cmd2 = ["uniq"] cmd3 = ["wc"] proc1 = sps.Popen(cmd1, stdin=insort, stdout=sps.PIPE, close_fds=True) proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, close_fds=True) proc3 = sps.Popen(cmd3, stdin=proc2.stdout, stdout=sps.PIPE, close_fds=True) res = proc3.communicate() nseeds = int(res[0].split()[0]) proc1.stdout.close() proc2.stdout.close() proc3.stdout.close() return nseeds
uses bash commands to quickly count N seeds from utemp file
def example(self, relative_path): example_path = os.path.join("examples", relative_path) return self.relative_file(__file__, example_path)
Load an example from the knitting pattern examples. :param str relative_path: the path to load :return: the result of the processing You can use :meth:`knittingpattern.Loader.PathLoader.examples` to find out the paths of all examples.
def add_child(self, child): if not isinstance(child, ChildMixin): raise TypeError( 'Requires instance of TreeElement. ' 'Got {}'.format(type(child)) ) child.parent = self self._children.append(child)
If the given object is an instance of Child add it to self and register self as a parent.
def rootdir(self, username, reponame, create=True): path = os.path.join(self.workspace, 'datasets', username, reponame) if create: try: os.makedirs(path) except: pass return path
Working directory for the repo
def configure_camera(self): r = self.local_renderer if self.env.camera_enabled: r.pc('Enabling camera.') r.enable_attr( filename='/boot/config.txt', key='start_x', value=1, use_sudo=True, ) r.enable_attr( filename='/boot/config.txt', key='gpu_mem', value=r.env.gpu_mem, use_sudo=True, ) r.run('cd ~; git clone https://github.com/raspberrypi/userland.git; cd userland; ./buildme') r.run('touch ~/.bash_aliases') r.append(r'PATH=$PATH:/opt/vc/bin\nexport PATH', '~/.bash_aliases') r.append(r'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\nexport LD_LIBRARY_PATH', '~/.bash_aliases') r.run('source ~/.bashrc') r.sudo('ldconfig') r.sudo("echo 'SUBSYSTEM==\"vchiq\",GROUP=\"video\",MODE=\"0660\"' > /etc/udev/rules.d/10-vchiq-permissions.rules") r.sudo("usermod -a -G video {user}") r.reboot(wait=300, timeout=60) self.test_camera() else: r.disable_attr( filename='/boot/config.txt', key='start_x', use_sudo=True, ) r.disable_attr( filename='/boot/config.txt', key='gpu_mem', use_sudo=True, ) r.reboot(wait=300, timeout=60)
Enables access to the camera. http://raspberrypi.stackexchange.com/questions/14229/how-can-i-enable-the-camera-without-using-raspi-config https://mike632t.wordpress.com/2014/06/26/raspberry-pi-camera-setup/ Afterwards, test with: /opt/vc/bin/raspistill --nopreview --output image.jpg Check for compatibility with: vcgencmd get_camera which should show: supported=1 detected=1
def copy(self, pk=None, new_name=None, **kwargs): orig = self.read(pk, fail_on_no_results=True, fail_on_multiple_results=True) orig = orig['results'][0] self._pop_none(kwargs) newresource = copy(orig) newresource.pop('id') basename = newresource['name'].split('@', 1)[0].strip() for field in self.fields: if field.multiple and field.name in newresource: newresource[field.name] = (newresource.get(field.name),) if new_name is None: newresource['name'] = "%s @ %s" % (basename, time.strftime('%X')) newresource.update(kwargs) return self.write(create_on_missing=True, fail_on_found=True, **newresource) else: if kwargs: raise exc.TowerCLIError('Cannot override {} and also use --new-name.'.format(kwargs.keys())) copy_endpoint = '{}/{}/copy/'.format(self.endpoint.strip('/'), pk) return client.post(copy_endpoint, data={'name': new_name}).json()
Copy an object. Only the ID is used for the lookup. All provided fields are used to override the old data from the copied resource. =====API DOCS===== Copy an object. :param pk: Primary key of the resource object to be copied :param new_name: The new name to give the resource if deep copying via the API :type pk: int :param `**kwargs`: Keyword arguments of fields whose given value will override the original value. :returns: loaded JSON of the copied new resource object. :rtype: dict =====API DOCS=====
def SPI(ledtype=None, num=0, **kwargs): from ...project.types.ledtype import make if ledtype is None: raise ValueError('Must provide ledtype value!') ledtype = make(ledtype) if num == 0: raise ValueError('Must provide num value >0!') if ledtype not in SPI_DRIVERS.keys(): raise ValueError('{} is not a valid LED type.'.format(ledtype)) return SPI_DRIVERS[ledtype](num, **kwargs)
Wrapper function for using SPI device drivers on systems like the Raspberry Pi and BeagleBone. This allows using any of the SPI drivers from a single entry point instead importing the driver for a specific LED type. Provides the same parameters of :py:class:`bibliopixel.drivers.SPI.SPIBase` as well as those below: :param ledtype: One of: LPD8806, WS2801, WS281X, or APA102
def callback(self, event): if event.mask == 0x00000008: if event.name.endswith('.json'): print_success("Ldapdomaindump file found") if event.name in ['domain_groups.json', 'domain_users.json']: if event.name == 'domain_groups.json': self.domain_groups_file = event.pathname if event.name == 'domain_users.json': self.domain_users_file = event.pathname if self.domain_groups_file and self.domain_users_file: print_success("Importing users") subprocess.Popen(['jk-import-domaindump', self.domain_groups_file, self.domain_users_file]) elif event.name == 'domain_computers.json': print_success("Importing computers") subprocess.Popen(['jk-import-domaindump', event.pathname]) self.ldap_strings = [] self.write_targets() if event.name.endswith('_samhashes.sam'): host = event.name.replace('_samhashes.sam', '') print_success("Secretsdump file, host ip: {}".format(host)) subprocess.Popen(['jk-import-secretsdump', event.pathname]) self.ips.remove(host) self.write_targets()
Function that gets called on each event from pyinotify.
def streaming_to_client(): for handler in client_logger.handlers: if hasattr(handler, 'append_newlines'): break else: handler = None old_propagate = client_logger.propagate client_logger.propagate = False if handler is not None: old_append = handler.append_newlines handler.append_newlines = False yield client_logger.propagate = old_propagate if handler is not None: handler.append_newlines = old_append
Puts the client logger into streaming mode, which sends unbuffered input through to the socket one character at a time. We also disable propagation so the root logger does not receive many one-byte emissions. This context handler was originally created for streaming Compose up's terminal output through to the client and should only be used for similarly complex circumstances.
def add_annotation_date(self, doc, annotation_date): if len(doc.annotations) != 0: if not self.annotation_date_set: self.annotation_date_set = True date = utils.datetime_from_iso_format(annotation_date) if date is not None: doc.annotations[-1].annotation_date = date return True else: raise SPDXValueError('Annotation::AnnotationDate') else: raise CardinalityError('Annotation::AnnotationDate') else: raise OrderError('Annotation::AnnotationDate')
Sets the annotation date. Raises CardinalityError if already set. OrderError if no annotator defined before. Raises SPDXValueError if invalid value.
def plot_border(mask, should_plot_border, units, kpc_per_arcsec, pointsize, zoom_offset_pixels): if should_plot_border and mask is not None: plt.gca() border_pixels = mask.masked_grid_index_to_pixel[mask.border_pixels] if zoom_offset_pixels is not None: border_pixels -= zoom_offset_pixels border_arcsec = mask.grid_pixels_to_grid_arcsec(grid_pixels=border_pixels) border_units = convert_grid_units(array=mask, grid_arcsec=border_arcsec, units=units, kpc_per_arcsec=kpc_per_arcsec) plt.scatter(y=border_units[:,0], x=border_units[:,1], s=pointsize, c='y')
Plot the borders of the mask or the array on the figure. Parameters -----------t. mask : ndarray of data.array.mask.Mask The mask applied to the array, the edge of which is plotted as a set of points over the plotted array. should_plot_border : bool If a mask is supplied, its borders pixels (e.g. the exterior edge) is plotted if this is *True*. units : str The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc'). kpc_per_arcsec : float or None The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc. border_pointsize : int The size of the points plotted to show the borders.
def add_annotation_comment(self, doc, comment): if len(doc.annotations) != 0: if not self.annotation_comment_set: self.annotation_comment_set = True doc.annotations[-1].comment = comment return True else: raise CardinalityError('AnnotationComment') else: raise OrderError('AnnotationComment')
Sets the annotation comment. Raises CardinalityError if already set. OrderError if no annotator defined before.
def copy(self, space=None, name=None): return Cells(space=space, name=name, formula=self.formula)
Make a copy of itself and return it.
def groupify(function): @wraps(function) def wrapper(paths, *args, **kwargs): groups = {} for path in paths: key = function(path, *args, **kwargs) if key is not None: groups.setdefault(key, set()).add(path) return groups return wrapper
Decorator to convert a function which takes a single value and returns a key into one which takes a list of values and returns a dict of key-group mappings. :param function: A function which takes a value and returns a hash key. :type function: ``function(value) -> key`` :rtype: .. parsed-literal:: function(iterable) -> {key: :class:`~__builtins__.set` ([value, ...]), ...}
def update(self): bulbs = self._hub.get_lights() if not bulbs: _LOGGER.debug("%s is offline, send command failed", self._zid) self._online = False
Update light objects to their current values.
def query_by_user(cls, user, with_pending=False, eager=False): q1 = Group.query.join(Membership).filter_by(user_id=user.get_id()) if not with_pending: q1 = q1.filter_by(state=MembershipState.ACTIVE) if eager: q1 = q1.options(joinedload(Group.members)) q2 = Group.query.join(GroupAdmin).filter_by( admin_id=user.get_id(), admin_type=resolve_admin_type(user)) if eager: q2 = q2.options(joinedload(Group.members)) query = q1.union(q2).with_entities(Group.id) return Group.query.filter(Group.id.in_(query))
Query group by user. :param user: User object. :param bool with_pending: Whether to include pending users. :param bool eager: Eagerly fetch group members. :returns: Query object.
def contact(request): form = ContactForm(request.POST or None) if form.is_valid(): subject = form.cleaned_data['subject'] message = form.cleaned_data['message'] sender = form.cleaned_data['sender'] cc_myself = form.cleaned_data['cc_myself'] recipients = settings.CONTACTFORM_RECIPIENTS if cc_myself: recipients.append(sender) send_mail(getattr(settings, "CONTACTFORM_SUBJECT_PREFIX", '') + subject, message, sender, recipients) return render(request, 'contactform/thanks.html') return render( request, 'contactform/contact.html', {'form': form})
Displays the contact form and sends the email
def run_model(model, returns_train, returns_test=None, bmark=None, samples=500, ppc=False, progressbar=True): if model == 'alpha_beta': model, trace = model_returns_t_alpha_beta(returns_train, bmark, samples, progressbar=progressbar) elif model == 't': model, trace = model_returns_t(returns_train, samples, progressbar=progressbar) elif model == 'normal': model, trace = model_returns_normal(returns_train, samples, progressbar=progressbar) elif model == 'best': model, trace = model_best(returns_train, returns_test, samples=samples, progressbar=progressbar) else: raise NotImplementedError( 'Model {} not found.' 'Use alpha_beta, t, normal, or best.'.format(model)) if ppc: ppc_samples = pm.sample_ppc(trace, samples=samples, model=model, size=len(returns_test), progressbar=progressbar) return trace, ppc_samples['returns'] return trace
Run one of the Bayesian models. Parameters ---------- model : {'alpha_beta', 't', 'normal', 'best'} Which model to run returns_train : pd.Series Timeseries of simple returns returns_test : pd.Series (optional) Out-of-sample returns. Datetimes in returns_test will be added to returns_train as missing values and predictions will be generated for them. bmark : pd.Series or pd.DataFrame (optional) Only used for alpha_beta to estimate regression coefficients. If bmark has more recent returns than returns_train, these dates will be treated as missing values and predictions will be generated for them taking market correlations into account. samples : int (optional) Number of posterior samples to draw. ppc : boolean (optional) Whether to run a posterior predictive check. Will generate samples of length returns_test. Returns a second argument that contains the PPC of shape samples x len(returns_test). Returns ------- trace : pymc3.sampling.BaseTrace object A PyMC3 trace object that contains samples for each parameter of the posterior. ppc : numpy.array (if ppc==True) PPC of shape samples x len(returns_test).
def KeyboardInput(wVk: int, wScan: int, dwFlags: int = KeyboardEventFlag.KeyDown, time_: int = 0) -> INPUT: return _CreateInput(KEYBDINPUT(wVk, wScan, dwFlags, time_, None))
Create Win32 struct `KEYBDINPUT` for `SendInput`.
def get_annotation_type(self, r_term): for _, _, typ in self.graph.triples(( r_term, self.spdx_namespace['annotationType'], None)): if typ is not None: return typ else: self.error = True msg = 'Annotation must have exactly one annotation type.' self.logger.log(msg) return
Returns annotation type or None if found none or more than one. Reports errors on failure.
def is_repository_file(self, relativePath): relativePath = self.to_repo_relative_path(path=relativePath, split=False) if relativePath == '': return False, False, False, False relaDir, name = os.path.split(relativePath) fileOnDisk = os.path.isfile(os.path.join(self.__path, relativePath)) infoOnDisk = os.path.isfile(os.path.join(self.__path,os.path.dirname(relativePath),self.__fileInfo%name)) classOnDisk = os.path.isfile(os.path.join(self.__path,os.path.dirname(relativePath),self.__fileClass%name)) cDir = self.__repo['walk_repo'] if len(relaDir): for dirname in relaDir.split(os.sep): dList = [d for d in cDir if isinstance(d, dict)] if not len(dList): cDir = None break cDict = [d for d in dList if dirname in d] if not len(cDict): cDir = None break cDir = cDict[0][dirname] if cDir is None: return False, fileOnDisk, infoOnDisk, classOnDisk if str(name) not in [str(i) for i in cDir]: return False, fileOnDisk, infoOnDisk, classOnDisk return True, fileOnDisk, infoOnDisk, classOnDisk
Check whether a given relative path is a repository file path :Parameters: #. relativePath (string): File relative path :Returns: #. isRepoFile (boolean): Whether file is a repository file. #. isFileOnDisk (boolean): Whether file is found on disk. #. isFileInfoOnDisk (boolean): Whether file info is found on disk. #. isFileClassOnDisk (boolean): Whether file class is found on disk.
def find(self, s): pSet = [s] parent = self._leader[s] while parent != self._leader[parent]: pSet.append(parent) parent = self._leader[parent] if len(pSet) > 1: for a in pSet: self._leader[a] = parent return parent
Locates the leader of the set to which the element ``s`` belongs. Parameters ---------- s : object An object that the ``UnionFind`` contains. Returns ------- object The leader of the set that contains ``s``.
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None): if len_filter is not None: trX, trY = len_filter.filter(trX, trY) trY = standardize_targets(trY, cost=self.cost) n = 0. t = time() costs = [] for e in range(n_epochs): epoch_costs = [] for xmb, ymb in self.iterator.iterXY(trX, trY): c = self._train(xmb, ymb) epoch_costs.append(c) n += len(ymb) if self.verbose >= 2: n_per_sec = n / (time() - t) n_left = len(trY) - n % len(trY) time_left = n_left/n_per_sec sys.stdout.write("\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left)) sys.stdout.flush() costs.extend(epoch_costs) status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time() - t) if self.verbose >= 2: sys.stdout.write("\r"+status) sys.stdout.flush() sys.stdout.write("\n") elif self.verbose == 1: print(status) if path and e % snapshot_freq == 0: save(self, "{0}.{1}".format(path, e)) return costs
Train model on given training examples and return the list of costs after each minibatch is processed. Args: trX (list) -- Inputs trY (list) -- Outputs batch_size (int, optional) -- number of examples in a minibatch (default 64) n_epochs (int, optional) -- number of epochs to train for (default 1) len_filter (object, optional) -- object to filter training example by length (default LenFilter()) snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1) path (str, optional) -- prefix of path where model snapshots are saved. If None, no snapshots are saved (default None) Returns: list -- costs of model after processing each minibatch
def trans(ele, standard=False): try: node = globals().get(ele['type']) if not node: raise NotImplementedError('%s is not supported!' % ele['type']) if standard: node = node.__dict__[ 'standard'] if 'standard' in node.__dict__ else node return node(**ele) except: raise
Translates esprima syntax tree to python by delegating to appropriate translating node
def filter_excluded_tags(self, all_tags): filtered_tags = copy.deepcopy(all_tags) if self.options.exclude_tags: filtered_tags = self.apply_exclude_tags(filtered_tags) if self.options.exclude_tags_regex: filtered_tags = self.apply_exclude_tags_regex(filtered_tags) return filtered_tags
Filter tags according exclude_tags and exclude_tags_regex option. :param list(dict) all_tags: Pre-filtered tags. :rtype: list(dict) :return: Filtered tags.
def _generate(self): candidates = np.array(range(self._n), np.uint32) for i in xrange(self._num): self._random.shuffle(candidates) pattern = candidates[0:self._getW()] self._patterns[i] = set(pattern)
Generates set of random patterns.
def filter_queryset(self, request, queryset, view): if request.user.is_staff: email = request.query_params.get('email', None) username = request.query_params.get('username', None) query_parameters = {} if email: query_parameters.update(email=email) if username: query_parameters.update(username=username) if query_parameters: users = User.objects.filter(**query_parameters).values_list('id', flat=True) queryset = queryset.filter(user_id__in=users) else: queryset = queryset.filter(user_id=request.user.id) return queryset
Apply incoming filters only if user is staff. If not, only filter by user's ID.
def messages(self): method = 'GET' url = 'activeMessage' rc = self.__request__(method, url) return rc['activeMessage']
Return active messages.
def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=None): lines = (['// marker so line numbers and indices both start at 1'] + lines + ['// marker so line numbers end in a known way']) include_state = _IncludeState() function_state = _FunctionState() nesting_state = NestingState() ResetNolintSuppressions() CheckForCopyright(filename, lines, error) ProcessGlobalSuppresions(lines) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) if file_extension in GetHeaderExtensions(): CheckForHeaderGuard(filename, clean_lines, error) for line in range(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) FlagCxx11Features(filename, clean_lines, line, error) nesting_state.CheckCompletedBlocks(filename, error) CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) if _IsSourceExtension(file_extension): CheckHeaderFileIncluded(filename, include_state, error) CheckForBadCharacters(filename, lines, error) CheckForNewlineAtEOF(filename, lines, error)
Performs lint checks and reports any errors to the given error function. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
def legal_status(self): r if self.__legal_status: return self.__legal_status else: self.__legal_status = legal_status(self.CAS, Method='COMBINED') return self.__legal_status
r'''Dictionary of legal status indicators for the chemical. Examples -------- >>> pprint(Chemical('benzene').legal_status) {'DSL': 'LISTED', 'EINECS': 'LISTED', 'NLP': 'UNLISTED', 'SPIN': 'LISTED', 'TSCA': 'LISTED'}
def preprocess_dict(d): out_env = {} for k, v in d.items(): if not type(v) in PREPROCESSORS: raise KeyError('Invalid type in dict: {}'.format(type(v))) out_env[k] = PREPROCESSORS[type(v)](v) return out_env
Preprocess a dict to be used as environment variables. :param d: dict to be processed
def permalink(self, repo, path): if not os.path.exists(path): return (None, None) cwd = os.getcwd() if os.path.isfile(path): os.chdir(os.path.dirname(path)) rootdir = self._run(["rev-parse", "--show-toplevel"]) if "fatal" in rootdir: return (None, None) os.chdir(rootdir) relpath = os.path.relpath(path, rootdir) sha1 = self._run(["log", "-n", "1", "--format=format:%H", relpath]) remoteurl = self._run(["config", "--get", "remote.origin.url"]) os.chdir(cwd) m = re.search('^git@([^:\/]+):([^/]+)/([^/]+)', remoteurl) if m is None: m = re.search('^https://([^:/]+)/([^/]+)/([^/]+)', remoteurl) if m is not None: domain = m.group(1) username = m.group(2) project = m.group(3) if project.endswith(".git"): project = project[:-4] permalink = "https://{}/{}/{}/blob/{}/{}".format(domain, username, project, sha1, relpath) return (relpath, permalink) else: return (None, None)
Get the permalink to command that generated the dataset
def _learnBacktrack(self): numPrevPatterns = len(self._prevLrnPatterns) - 1 if numPrevPatterns <= 0: if self.verbosity >= 3: print "lrnBacktrack: No available history to backtrack from" return False badPatterns = [] inSequence = False for startOffset in range(0, numPrevPatterns): inSequence = self._learnBacktrackFrom(startOffset, readOnly=True) if inSequence: break badPatterns.append(startOffset) if not inSequence: if self.verbosity >= 3: print ("Failed to lock on. Falling back to start cells on current " "time step.") self._prevLrnPatterns = [] return False if self.verbosity >= 3: print ("Discovered path to current input by using start cells from %d " "steps ago:" % (numPrevPatterns - startOffset), self._prevLrnPatterns[startOffset]) self._learnBacktrackFrom(startOffset, readOnly=False) for i in range(numPrevPatterns): if i in badPatterns or i <= startOffset: if self.verbosity >= 3: print ("Removing useless pattern from history:", self._prevLrnPatterns[0]) self._prevLrnPatterns.pop(0) else: break return numPrevPatterns - startOffset
This "backtracks" our learning state, trying to see if we can lock onto the current set of inputs by assuming the sequence started up to N steps ago on start cells. This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a sequence that started earlier. :returns: >0 if we managed to lock on to a sequence that started earlier. The value returned is how many steps in the past we locked on. If 0 is returned, the caller needs to change active state to start on start cells. How it works: ------------------------------------------------------------------- This method gets called from updateLearningState when we detect either of the following two conditions: #. Our PAM counter (@ref pamCounter) expired #. We reached the max allowed learned sequence length Either of these two conditions indicate that we want to start over on start cells. Rather than start over on start cells on the current input, we can accelerate learning by backtracking a few steps ago and seeing if perhaps a sequence we already at least partially know already started. This updates/modifies: - @ref lrnActiveState['t'] This trashes: - @ref lrnActiveState['t-1'] - @ref lrnPredictedState['t'] - @ref lrnPredictedState['t-1']
def annotate_metadata_action(repo): package = repo.package print("Including history of actions") with cd(repo.rootdir): filename = ".dgit/log.json" if os.path.exists(filename): history = open(filename).readlines() actions = [] for a in history: try: a = json.loads(a) for x in ['code']: if x not in a or a[x] == None: a[x] = "..." actions.append(a) except: pass package['actions'] = actions
Update metadata with the action history
def dispatch(self, message, source = None): msgtype = "" try: if type(message[0]) == str: address = message[0] self.callbacks[address](message) elif type(message[0]) == list: for msg in message: self.dispatch(msg) except KeyError, key: print 'address %s not found, %s: %s' % (address, key, message) pprint.pprint(message) except IndexError, e: print '%s: %s' % (e, message) pass except None, e: print "Exception in", address, "callback :", e return
Sends decoded OSC data to an appropriate calback
def all(self): response = self.api.get(url=PATHS['GET_PROFILES']) for raw_profile in response: self.append(Profile(self.api, raw_profile)) return self
Get all social newtworks profiles
def duplicated_rows(df, col_name): _check_cols(df, [col_name]) dups = df[pd.notnull(df[col_name]) & df.duplicated(subset=[col_name])] return dups
Return a DataFrame with the duplicated values of the column `col_name` in `df`.
def validate_params(required, optional, params): missing_fields = [x for x in required if x not in params] if missing_fields: field_strings = ", ".join(missing_fields) raise Exception("Missing fields: %s" % field_strings) disallowed_fields = [x for x in params if x not in optional and x not in required] if disallowed_fields: field_strings = ", ".join(disallowed_fields) raise Exception("Disallowed fields: %s" % field_strings)
Helps us validate the parameters for the request :param valid_options: a list of strings of valid options for the api request :param params: a dict, the key-value store which we really only care about the key which has tells us what the user is using for the API request :returns: None or throws an exception if the validation fails
def reload_cache_config(self, call_params): path = '/' + self.api_version + '/ReloadCacheConfig/' method = 'POST' return self.request(path, method, call_params)
REST Reload Plivo Cache Config helper
def save_stream(self, key, binary=False): s = io.BytesIO() if binary else io.StringIO() yield s self.save_value(key, s.getvalue())
Return a managed file-like object into which the calling code can write arbitrary data. :param key: :return: A managed stream-like object
def pickle_save(thing,fname): pickle.dump(thing, open(fname,"wb"),pickle.HIGHEST_PROTOCOL) return thing
save something to a pickle file
def complex_has_member(graph: BELGraph, complex_node: ComplexAbundance, member_node: BaseEntity) -> bool: return any( v == member_node for _, v, data in graph.out_edges(complex_node, data=True) if data[RELATION] == HAS_COMPONENT )
Does the given complex contain the member?
def flip_axis_multi(x, axis, is_random=False): if is_random: factor = np.random.uniform(-1, 1) if factor > 0: results = [] for data in x: data = np.asarray(data).swapaxes(axis, 0) data = data[::-1, ...] data = data.swapaxes(0, axis) results.append(data) return np.asarray(results) else: return np.asarray(x) else: results = [] for data in x: data = np.asarray(data).swapaxes(axis, 0) data = data[::-1, ...] data = data.swapaxes(0, axis) results.append(data) return np.asarray(results)
Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly, Parameters ----------- x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). others : args See ``tl.prepro.flip_axis``. Returns ------- numpy.array A list of processed images.
def set_file_license_comment(self, doc, text): if self.has_package(doc) and self.has_file(doc): if not self.file_license_comment_set: self.file_license_comment_set = True self.file(doc).license_comment = text return True else: raise CardinalityError('File::LicenseComment') else: raise OrderError('File::LicenseComment')
Raises OrderError if no package or file defined. Raises CardinalityError if more than one per file.
def convert_matmul(params, w_name, scope_name, inputs, layers, weights, names): print('Converting matmul ...') if names == 'short': tf_name = 'MMUL' + random_string(4) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) if len(inputs) == 1: weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() input_channels, output_channels = W.shape keras_weights = [W] dense = keras.layers.Dense( output_channels, weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros', ) layers[scope_name] = dense(layers[inputs[0]]) elif len(inputs) == 2: weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() input_channels, output_channels = W.shape keras_weights = [W] dense = keras.layers.Dense( output_channels, weights=keras_weights, use_bias=False, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros', ) layers[scope_name] = dense(layers[inputs[0]]) else: raise AssertionError('Cannot convert matmul layer')
Convert matmul layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers