code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def save_to(self, obj): if isinstance(obj, dict): obj = dict(obj) for key in self.changed_fields: if key in self.cleaned_data: val = self.cleaned_data.get(key) set_obj_value(obj, key, val) return obj
Save the cleaned data to an object.
def execute_script(self, name, keys, *args, **options): script = get_script(name) if not script: raise redis.RedisError('No such script "%s"' % name) address = self.address() if address not in all_loaded_scripts: all_loaded_scripts[address] = set() loaded = all_loaded_scripts[address] toload = script.required_scripts.difference(loaded) for name in toload: s = get_script(name) yield self.script_load(s.script) loaded.update(toload) yield script(self, keys, args, options)
Execute a script. makes sure all required scripts are loaded.
def register_channel_post_handler(self, callback, *custom_filters, commands=None, regexp=None, content_types=None, state=None, run_task=None, **kwargs): filters_set = self.filters_factory.resolve(self.channel_post_handlers, *custom_filters, commands=commands, regexp=regexp, content_types=content_types, state=state, **kwargs) self.channel_post_handlers.register(self._wrap_async_task(callback, run_task), filters_set)
Register handler for channel post :param callback: :param commands: list of commands :param regexp: REGEXP :param content_types: List of content types. :param state: :param custom_filters: list of custom filters :param run_task: run callback in task (no wait results) :param kwargs: :return: decorated function
def disable_switchport(self, inter_type, inter): config = ET.Element('config') interface = ET.SubElement(config, 'interface', xmlns=("urn:brocade.com:mgmt:" "brocade-interface")) int_type = ET.SubElement(interface, inter_type) name = ET.SubElement(int_type, 'name') name.text = inter ET.SubElement(int_type, 'switchport-basic', operation='delete') try: self._callback(config) return True except Exception as error: logging.error(error) return False
Change an interface's operation to L3. Args: inter_type: The type of interface you want to configure. Ex. tengigabitethernet, gigabitethernet, fortygigabitethernet. inter: The ID for the interface you want to configure. Ex. 1/0/1 Returns: True if command completes successfully or False if not. Raises: None
def write_intro (self): self.comment(_("created by %(app)s at %(time)s") % {"app": configuration.AppName, "time": strformat.strtime(self.starttime)}) self.comment(_("Get the newest version at %(url)s") % {'url': configuration.Url}) self.comment(_("Write comments and bugs to %(url)s") % {'url': configuration.SupportUrl}) self.comment(_("Support this project at %(url)s") % {'url': configuration.DonateUrl}) self.check_date()
Write intro comments.
def get_field_info(self, field): field_info = self.get_attributes(field) field_info["required"] = getattr(field, "required", False) field_info["type"] = self.get_label_lookup(field) if getattr(field, "child", None): field_info["child"] = self.get_field_info(field.child) elif getattr(field, "fields", None): field_info["children"] = self.get_serializer_info(field) if (not isinstance(field, (serializers.RelatedField, serializers.ManyRelatedField)) and hasattr(field, "choices")): field_info["choices"] = [ { "value": choice_value, "display_name": force_text(choice_name, strings_only=True) } for choice_value, choice_name in field.choices.items() ] return field_info
This method is basically a mirror from rest_framework==3.3.3 We are currently pinned to rest_framework==3.1.1. If we upgrade, this can be refactored and simplified to rely more heavily on rest_framework's built in logic.
def detect_traits(item): return traits.detect_traits( name=item.name, alias=item.alias, filetype=(list(item.fetch("kind_51")) or [None]).pop(), )
Build traits list from attributes of the passed item. Currently, "kind_51", "name" and "alias" are considered. See pyrocore.util.traits:dectect_traits for more details.
def partition(pred, iterable): trues = [] falses = [] for item in iterable: if pred(item): trues.append(item) else: falses.append(item) return trues, falses
split the results of an iterable based on a predicate
def _lint(): project_python_files = [filename for filename in get_project_files() if filename.endswith(b'.py')] retcode = subprocess.call( ['flake8', '--max-complexity=10'] + project_python_files) if retcode == 0: print_success_message('No style errors') return retcode
Run lint and return an exit code.
def map_sections(fun, neurites, neurite_type=NeuriteType.all, iterator_type=Tree.ipreorder): return map(fun, iter_sections(neurites, iterator_type=iterator_type, neurite_filter=is_type(neurite_type)))
Map `fun` to all the sections in a collection of neurites
def bestfit_func(self, bestfit_x): if not self.bestfit_func: raise KeyError("Do do_bestfit first") return self.args["func"](self.fit_args, bestfit_x)
Returns y value
def ekifld(handle, tabnam, ncols, nrows, cnmlen, cnames, declen, decls): handle = ctypes.c_int(handle) tabnam = stypes.stringToCharP(tabnam) ncols = ctypes.c_int(ncols) nrows = ctypes.c_int(nrows) cnmlen = ctypes.c_int(cnmlen) cnames = stypes.listToCharArray(cnames) declen = ctypes.c_int(declen) recptrs = stypes.emptyIntVector(nrows) decls = stypes.listToCharArray(decls) segno = ctypes.c_int() libspice.ekifld_c(handle, tabnam, ncols, nrows, cnmlen, cnames, declen, decls, ctypes.byref(segno), recptrs) return segno.value, stypes.cVectorToPython(recptrs)
Initialize a new E-kernel segment to allow fast writing. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekifld_c.html :param handle: File handle. :type handle: int :param tabnam: Table name. :type tabnam: str :param ncols: Number of columns in the segment. :type ncols: int :param nrows: Number of rows in the segment. :type nrows: int :param cnmlen: Length of names in in column name array. :type cnmlen: int :param cnames: Names of columns. :type cnames: list of str. :param declen: Length of declaration strings in declaration array. :type declen: int :param decls: Declarations of columns. :type decls: list of str. :return: Segment number, Array of record pointers. :rtype: tuple
def get_grid_data(xall, yall, zall, nbins=100, method='nearest'): from scipy.interpolate import griddata x, y = _np.meshgrid( _np.linspace(xall.min(), xall.max(), nbins), _np.linspace(yall.min(), yall.max(), nbins), indexing='ij') z = griddata( _np.hstack([xall[:,None], yall[:,None]]), zall, (x, y), method=method) return x, y, z
Interpolate unstructured two-dimensional data. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. nbins : int, optional, default=100 Number of histogram bins used in x/y-dimensions. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. Returns ------- x : ndarray(nbins, nbins) The bins' x-coordinates in meshgrid format. y : ndarray(nbins, nbins) The bins' y-coordinates in meshgrid format. z : ndarray(nbins, nbins) Interpolated z-data in meshgrid format.
def _onEncoding(self, encString, line, pos, absPosition): self.encoding = Encoding(encString, line, pos, absPosition)
Memorizes module encoding
def wait_for_keys(self, *keys, timeout=0): if len(keys) == 1 and _is_iterable(keys[0]): keys = keys[0] return self.listen_until_return(Handler.key_press(keys), timeout=timeout)
Waits until one of the specified keys was pressed, and returns which key was pressed. :param keys: iterable of integers of pygame-keycodes, or simply multiple keys passed via multiple arguments :type keys: iterable :param timeout: number of seconds to wait till the function returns :type timeout: float :returns: The keycode of the pressed key, or None in case of timeout :rtype: int
def tearpage_backend(filename, teared_pages=None): if teared_pages is None: teared_pages = [0] with tempfile.NamedTemporaryFile() as tmp: shutil.copy(filename, tmp.name) try: input_file = PdfFileReader(open(tmp.name, 'rb')) except PdfReadError: fix_pdf(filename, tmp.name) input_file = PdfFileReader(open(tmp.name, 'rb')) num_pages = input_file.getNumPages() output_file = PdfFileWriter() for i in range(num_pages): if i in teared_pages: continue output_file.addPage(input_file.getPage(i)) tmp.close() outputStream = open(filename, "wb") output_file.write(outputStream)
Copy filename to a tempfile, write pages to filename except the teared one. ..note :: Adapted from sciunto's code, https://github.com/sciunto/tear-pages :param filename: PDF filepath :param teared_pages: Numbers of the pages to tear. Default to first page \ only.
def is_canonical(version, loosedev=False): if loosedev: return loose440re.match(version) is not None return pep440re.match(version) is not None
Return whether or not the version string is canonical according to Pep 440
def __create_preview_object_base(self, dct): if dct.get("_id"): del dct["_id"] preview_object_id = yield self.previews.insert(dct) raise Return(preview_object_id)
The starting point for a preview of a future object. This is the object which will have future revisions iterated and applied to. :param dict dct: The starting object dictionary :return: The preview object id :rtype: str
def create_spooled_temporary_file(filepath=None, fileobj=None): spooled_file = tempfile.SpooledTemporaryFile( max_size=settings.TMP_FILE_MAX_SIZE, dir=settings.TMP_DIR) if filepath: fileobj = open(filepath, 'r+b') if fileobj is not None: fileobj.seek(0) copyfileobj(fileobj, spooled_file, settings.TMP_FILE_READ_SIZE) return spooled_file
Create a spooled temporary file. if ``filepath`` or ``fileobj`` is defined its content will be copied into temporary file. :param filepath: Path of input file :type filepath: str :param fileobj: Input file object :type fileobj: file :returns: Spooled temporary file :rtype: :class:`tempfile.SpooledTemporaryFile`
def padDigitalData(self, dig_data, n): n = int(n) l0 = len(dig_data) if l0 % n == 0: return dig_data else: ladd = n - (l0 % n) dig_data_add = np.zeros(ladd, dtype="uint32") dig_data_add.fill(dig_data[-1]) return np.concatenate((dig_data, dig_data_add))
Pad dig_data with its last element so that the new array is a multiple of n.
def upvotes(self, option): params = join_params(self.parameters, {"upvotes": option}) return self.__class__(**params)
Set whether to filter by a user's upvoted list. Options available are user.ONLY, user.NOT, and None; default is None.
def where(self, column_or_label, value_or_predicate=None, other=None): column = self._get_column(column_or_label) if other is not None: assert callable(value_or_predicate), "Predicate required for 3-arg where" predicate = value_or_predicate other = self._get_column(other) column = [predicate(y)(x) for x, y in zip(column, other)] elif value_or_predicate is not None: if not callable(value_or_predicate): predicate = _predicates.are.equal_to(value_or_predicate) else: predicate = value_or_predicate column = [predicate(x) for x in column] return self.take(np.nonzero(column)[0])
Return a new ``Table`` containing rows where ``value_or_predicate`` returns True for values in ``column_or_label``. Args: ``column_or_label``: A column of the ``Table`` either as a label (``str``) or an index (``int``). Can also be an array of booleans; only the rows where the array value is ``True`` are kept. ``value_or_predicate``: If a function, it is applied to every value in ``column_or_label``. Only the rows where ``value_or_predicate`` returns True are kept. If a single value, only the rows where the values in ``column_or_label`` are equal to ``value_or_predicate`` are kept. ``other``: Optional additional column label for ``value_or_predicate`` to make pairwise comparisons. See the examples below for usage. When ``other`` is supplied, ``value_or_predicate`` must be a callable function. Returns: If ``value_or_predicate`` is a function, returns a new ``Table`` containing only the rows where ``value_or_predicate(val)`` is True for the ``val``s in ``column_or_label``. If ``value_or_predicate`` is a value, returns a new ``Table`` containing only the rows where the values in ``column_or_label`` are equal to ``value_or_predicate``. If ``column_or_label`` is an array of booleans, returns a new ``Table`` containing only the rows where ``column_or_label`` is ``True``. >>> marbles = Table().with_columns( ... "Color", make_array("Red", "Green", "Blue", ... "Red", "Green", "Green"), ... "Shape", make_array("Round", "Rectangular", "Rectangular", ... "Round", "Rectangular", "Round"), ... "Amount", make_array(4, 6, 12, 7, 9, 2), ... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00)) >>> marbles Color | Shape | Amount | Price Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.2 Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 0 Green | Round | 2 | 3 Use a value to select matching rows >>> marbles.where("Price", 1.3) Color | Shape | Amount | Price Red | Round | 4 | 1.3 In general, a higher order predicate function such as the functions in ``datascience.predicates.are`` can be used. >>> from datascience.predicates import are >>> # equivalent to previous example >>> marbles.where("Price", are.equal_to(1.3)) Color | Shape | Amount | Price Red | Round | 4 | 1.3 >>> marbles.where("Price", are.above(1.5)) Color | Shape | Amount | Price Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Round | 2 | 3 Use the optional argument ``other`` to apply predicates to compare columns. >>> marbles.where("Price", are.above, "Amount") Color | Shape | Amount | Price Green | Round | 2 | 3 >>> marbles.where("Price", are.equal_to, "Amount") # empty table Color | Shape | Amount | Price
def every(predicate, *iterables): r try: if len(iterables) == 1: ifilterfalse(predicate, iterables[0]).next() else: ifilterfalse(bool, starmap(predicate, izip(*iterables))).next() except StopIteration: return True else: return False
r"""Like `some`, but only returns `True` if all the elements of `iterables` satisfy `predicate`. Examples: >>> every(bool, []) True >>> every(bool, [0]) False >>> every(bool, [1,1]) True >>> every(operator.eq, [1,2,3],[1,2]) True >>> every(operator.eq, [1,2,3],[0,2]) False
def for_category(self, category, live_only=False): filters = {'tag': category.tag} if live_only: filters.update({'entry__live': True}) return self.filter(**filters)
Returns queryset of EntryTag instances for specified category. :param category: the Category instance. :param live_only: flag to include only "live" entries. :rtype: django.db.models.query.QuerySet.
def _imagpart(self, f): def f_im(x, **kwargs): result = np.asarray(f(x, **kwargs), dtype=self.scalar_out_dtype) return result.imag if is_real_dtype(self.out_dtype): return self.zero() else: return self.real_space.element(f_im)
Function returning the imaginary part of the result from ``f``.
def get_top_artists(self, limit=None, cacheable=True): params = {} if limit: params["limit"] = limit doc = _Request(self, "chart.getTopArtists", params).execute(cacheable) return _extract_top_artists(doc, self)
Returns the most played artists as a sequence of TopItem objects.
def make_directory_if_not_exists(path): try: os.makedirs(path) except OSError, error: if error.errno <> errno.EEXIST: raise error
Create the specified path, making all intermediate-level directories needed to contain the leaf directory. Ignore any error that would occur if the leaf directory already exists. @note: all the intermediate-level directories are created with the default mode is 0777 (octal). @param path: the path to create. @raise OSError: an error that would occur if the path cannot be created.
def bulk_record_workunits(self, engine_workunits): for workunit in engine_workunits: duration = workunit['end_timestamp'] - workunit['start_timestamp'] span = zipkin_span( service_name="pants", span_name=workunit['name'], duration=duration, span_storage=self.span_storage, ) span.zipkin_attrs = ZipkinAttrs( trace_id=self.trace_id, span_id=workunit['span_id'], parent_span_id=workunit.get("parent_id", self.parent_id), flags='0', is_sampled=True, ) span.start() span.start_timestamp = workunit['start_timestamp'] span.stop()
A collection of workunits from v2 engine part
def build_single(mode): if mode == 'force': amode = ['-a'] else: amode = [] if executable.endswith('uwsgi'): _executable = executable[:-5] + 'python' else: _executable = executable p = subprocess.Popen([_executable, '-m', 'nikola', 'build'] + amode, stderr=subprocess.PIPE) p.wait() rl = p.stderr.readlines() try: out = ''.join(rl) except TypeError: out = ''.join(l.decode('utf-8') for l in rl) return (p.returncode == 0), out
Build, in the single-user mode.
def get_assessments_offered_by_search(self, assessment_offered_query, assessment_offered_search): if not self._can('search'): raise PermissionDenied() return self._provider_session.get_assessments_offered_by_search(assessment_offered_query, assessment_offered_search)
Pass through to provider AssessmentOfferedSearchSession.get_assessments_offered_by_search
def ToJSonResponse(self, columns_order=None, order_by=(), req_id=0, response_handler="google.visualization.Query.setResponse"): response_obj = { "version": "0.6", "reqId": str(req_id), "table": self._ToJSonObj(columns_order, order_by), "status": "ok" } encoded_response_str = DataTableJSONEncoder().encode(response_obj) if not isinstance(encoded_response_str, str): encoded_response_str = encoded_response_str.encode("utf-8") return "%s(%s);" % (response_handler, encoded_response_str)
Writes a table as a JSON response that can be returned as-is to a client. This method writes a JSON response to return to a client in response to a Google Visualization API query. This string can be processed by the calling page, and is used to deliver a data table to a visualization hosted on a different page. Args: columns_order: Optional. Passed straight to self.ToJSon(). order_by: Optional. Passed straight to self.ToJSon(). req_id: Optional. The response id, as retrieved by the request. response_handler: Optional. The response handler, as retrieved by the request. Returns: A JSON response string to be received by JS the visualization Query object. This response would be translated into a DataTable on the client side. Example result (newlines added for readability): google.visualization.Query.setResponse({ 'version':'0.6', 'reqId':'0', 'status':'OK', 'table': {cols: [...], rows: [...]}}); Note: The URL returning this string can be used as a data source by Google Visualization Gadgets or from JS code.
def to_one_hot(dataY): nc = 1 + np.max(dataY) onehot = [np.zeros(nc, dtype=np.int8) for _ in dataY] for i, j in enumerate(dataY): onehot[i][j] = 1 return onehot
Convert the vector of labels dataY into one-hot encoding. :param dataY: vector of labels :return: one-hot encoded labels
def execfile(fname, variables): with open(fname) as f: code = compile(f.read(), fname, 'exec') exec(code, variables)
This is builtin in python2, but we have to roll our own on py3.
def SetEventTag(self, event_tag): event_identifier = event_tag.GetEventIdentifier() lookup_key = event_identifier.CopyToString() self._index[lookup_key] = event_tag.GetIdentifier()
Sets an event tag in the index. Args: event_tag (EventTag): event tag.
def inherited_labels(cls): return [scls.__label__ for scls in cls.mro() if hasattr(scls, '__label__') and not hasattr( scls, '__abstract_node__')]
Return list of labels from nodes class hierarchy. :return: list
def translate(self): value = super().translate() if value is None or (isinstance(value, str) and value.strip() == ''): return None return int(value)
Gets the value in the current language, or in the configured fallbck language.
def get_absl_log_prefix(record): created_tuple = time.localtime(record.created) created_microsecond = int(record.created % 1.0 * 1e6) critical_prefix = '' level = record.levelno if _is_non_absl_fatal_record(record): level = logging.ERROR critical_prefix = _CRITICAL_PREFIX severity = converter.get_initial_for_level(level) return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % ( severity, created_tuple.tm_mon, created_tuple.tm_mday, created_tuple.tm_hour, created_tuple.tm_min, created_tuple.tm_sec, created_microsecond, _get_thread_id(), record.filename, record.lineno, critical_prefix)
Returns the absl log prefix for the log record. Args: record: logging.LogRecord, the record to get prefix for.
def userinfo_json(request, user_id): data = {'first_name': '', 'last_name': '', 'email': '', 'slug': '', 'bio': '', 'phone': '', 'is_active': False} try: member = StaffMember.objects.get(pk=user_id) for key in data.keys(): if hasattr(member, key): data[key] = getattr(member, key, '') except StaffMember.DoesNotExist: pass return HttpResponse(json.dumps(data), mimetype='application/json')
Return the user's information in a json object
def option(self, *args, **kwargs): args, kwargs = _config_parameter(args, kwargs) return self._click.option(*args, **kwargs)
Registers a click.option which falls back to a configmanager Item if user hasn't provided a value in the command line. Item must be the last of ``args``. Examples:: config = Config({'greeting': 'Hello'}) @click.command() @config.click.option('--greeting', config.greeting) def say_hello(greeting): click.echo(greeting)
def white_noise(dur=None, low=-1., high=1.): if dur is None or (isinf(dur) and dur > 0): while True: yield random.uniform(low, high) for x in xrange(rint(dur)): yield random.uniform(low, high)
White noise stream generator. Parameters ---------- dur : Duration, in number of samples; endless if not given (or None). low, high : Lower and higher limits. Defaults to the [-1; 1] range. Returns ------- Stream yielding random numbers between -1 and 1.
def rmse(targets, predictions): r _supervised_evaluation_error_checking(targets, predictions) return _turicreate.extensions._supervised_streaming_evaluator(targets, predictions, "rmse", {})
r""" Compute the root mean squared error between two SArrays. Parameters ---------- targets : SArray[float or int] An Sarray of ground truth target values. predictions : SArray[float or int] The prediction that corresponds to each target value. This vector must have the same length as ``targets``. Returns ------- out : float The RMSE between the two SArrays. See Also -------- max_error Notes ----- The root mean squared error between two vectors, x and y, is defined as: .. math:: RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - y_i)^2} References ---------- - `Wikipedia - root-mean-square deviation <http://en.wikipedia.org/wiki/Root-mean-square_deviation>`_ Examples -------- >>> targets = turicreate.SArray([3.14, 0.1, 50, -2.5]) >>> predictions = turicreate.SArray([3.1, 0.5, 50.3, -5]) >>> turicreate.evaluation.rmse(targets, predictions) 1.2749117616525465
def verify_edge_segments(edge_infos): if edge_infos is None: return for edge_info in edge_infos: num_segments = len(edge_info) for index in six.moves.xrange(-1, num_segments - 1): index1, start1, end1 = edge_info[index] if not 0.0 <= start1 < end1 <= 1.0: raise ValueError(BAD_SEGMENT_PARAMS, edge_info[index]) index2, _, _ = edge_info[index + 1] if index1 == index2: raise ValueError( SEGMENTS_SAME_EDGE, edge_info[index], edge_info[index + 1] )
Verify that the edge segments in an intersection are valid. .. note:: This is a helper used only by :func:`generic_intersect`. Args: edge_infos (Optional[list]): List of "edge info" lists. Each list represents a curved polygon and contains 3-tuples of edge index, start and end (see the output of :func:`ends_to_curve`). Raises: ValueError: If two consecutive edge segments lie on the same edge index. ValueError: If the start and end parameter are "invalid" (they should be between 0 and 1 and start should be strictly less than end).
def run(self, func=None): args = self.parser.parse_args() if self.__add_vq is not None and self.__config_logging: self.__config_logging(args) if self.__show_version_func and args.version and callable(self.__show_version_func): self.__show_version_func(self, args) elif args.func is not None: args.func(self, args) elif func is not None: func(self, args) else: self.parser.print_help()
Run the app
def dataReceived(self, data): self.bytes_in += (len(data)) self.buffer_in = self.buffer_in + data while self.CheckDataReceived(): pass
Called from Twisted whenever data is received.
def has_mixture_channel(val: Any) -> bool: mixture_getter = getattr(val, '_has_mixture_', None) result = NotImplemented if mixture_getter is None else mixture_getter() if result is not NotImplemented: return result result = has_unitary(val) if result is not NotImplemented and result: return result return mixture_channel(val, None) is not None
Returns whether the value has a mixture channel representation. In contrast to `has_mixture` this method falls back to checking whether the value has a unitary representation via `has_channel`. Returns: If `val` has a `_has_mixture_` method and its result is not NotImplemented, that result is returned. Otherwise, if `val` has a `_has_unitary_` method and its results is not NotImplemented, that result is returned. Otherwise, if the value has a `_mixture_` method that is not a non-default value, True is returned. Returns False if none of these functions.
def get_compound_pd(self): entry1 = PDEntry(self.entry1.composition, 0) entry2 = PDEntry(self.entry2.composition, 0) cpd = CompoundPhaseDiagram( self.rxn_entries + [entry1, entry2], [Composition(entry1.composition.reduced_formula), Composition(entry2.composition.reduced_formula)], normalize_terminal_compositions=False) return cpd
Get the CompoundPhaseDiagram object, which can then be used for plotting. Returns: (CompoundPhaseDiagram)
def subpnt(method, target, et, fixref, abcorr, obsrvr): method = stypes.stringToCharP(method) target = stypes.stringToCharP(target) et = ctypes.c_double(et) fixref = stypes.stringToCharP(fixref) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) spoint = stypes.emptyDoubleVector(3) trgepc = ctypes.c_double(0) srfvec = stypes.emptyDoubleVector(3) libspice.subpnt_c(method, target, et, fixref, abcorr, obsrvr, spoint, ctypes.byref(trgepc), srfvec) return stypes.cVectorToPython(spoint), trgepc.value, stypes.cVectorToPython( srfvec)
Compute the rectangular coordinates of the sub-observer point on a target body at a specified epoch, optionally corrected for light time and stellar aberration. This routine supersedes :func:`subpt`. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/subpnt_c.html :param method: Computation method. :type method: str :param target: Name of target body. :type target: str :param et: Epoch in ephemeris seconds past J2000 TDB. :type et: float :param fixref: Body-fixed, body-centered target body frame. :type fixref: str :param abcorr: Aberration correction. :type abcorr: str :param obsrvr: Name of observing body. :type obsrvr: str :return: Sub-observer point on the target body, Sub-observer point epoch, Vector from observer to sub-observer point. :rtype: tuple
def OnPrintPreview(self, event): print_area = self._get_print_area() print_data = self.main_window.print_data self.main_window.actions.print_preview(print_area, print_data)
Print preview handler
def probe_async(self, callback): topics = MQTTTopicValidator(self.prefix) self.client.publish(topics.probe, {'type': 'command', 'operation': 'probe', 'client': self.name}) callback(self.id, True, None)
Probe for visible devices connected to this DeviceAdapter. Args: callback (callable): A callback for when the probe operation has completed. callback should have signature callback(adapter_id, success, failure_reason) where: success: bool failure_reason: None if success is True, otherwise a reason for why we could not probe
def proxy(host='localhost', port=4304, flags=0, persistent=False, verbose=False, ): try: gai = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP) except socket.gaierror as err: raise ConnError(*err.args) assert gai for (family, _type, _proto, _, sockaddr) in gai: assert _type is socket.SOCK_STREAM and _proto is socket.IPPROTO_TCP owp = _Proxy(family, sockaddr, flags, verbose) try: owp.ping() except ConnError as err: lasterr = err.args continue else: break else: raise ConnError(*lasterr) owp._init_errcodes() if persistent: owp = clone(owp, persistent=True) assert not isinstance(owp, _PersistentProxy) or owp.conn is None return owp
factory function that returns a proxy object for an owserver at host, port.
def parse(cls, uri): uri_components = urlsplit(uri) adapter_fn = lambda x: x if x is not None and (isinstance(x, str) is False or len(x)) > 0 else None return cls( scheme=adapter_fn(uri_components.scheme), username=adapter_fn(uri_components.username), password=adapter_fn(uri_components.password), hostname=adapter_fn(uri_components.hostname), port=adapter_fn(uri_components.port), path=adapter_fn(uri_components.path), query=adapter_fn(uri_components.query), fragment=adapter_fn(uri_components.fragment), )
Parse URI-string and return WURI object :param uri: string to parse :return: WURI
def _allow_custom_expire(self, load): expire_override = self.opts.get('token_expire_user_override', False) if expire_override is True: return True if isinstance(expire_override, collections.Mapping): expire_whitelist = expire_override.get(load['eauth'], []) if isinstance(expire_whitelist, collections.Iterable): if load.get('username') in expire_whitelist: return True return False
Return bool if requesting user is allowed to set custom expire
def plural(self, text, count=None): pre, word, post = self.partition_word(text) if not word: return text plural = self.postprocess( word, self._pl_special_adjective(word, count) or self._pl_special_verb(word, count) or self._plnoun(word, count), ) return "{}{}{}".format(pre, plural, post)
Return the plural of text. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved.
def get(self, *args, **kwargs): if not mqueue.qsize(): return None message_data, content_type, content_encoding = mqueue.get() return self.Message(backend=self, body=message_data, content_type=content_type, content_encoding=content_encoding)
Get the next waiting message from the queue. :returns: A :class:`Message` instance, or ``None`` if there is no messages waiting.
def find(name): if op.exists(name): return name path = op.dirname(__file__) or '.' paths = [path] + config['include_path'] for path in paths: filename = op.abspath(op.join(path, name)) if op.exists(filename): return filename for d in os.listdir(path): fullpath = op.abspath(op.join(path, d)) if op.isdir(fullpath): filename = op.abspath(op.join(fullpath, name)) if op.exists(filename): return filename return None
Locate a filename into the shader library.
def ref(self, orm_classpath, cls_pk=None): orm_module, orm_class = get_objects(orm_classpath) q = orm_class.query if cls_pk: found = False for fn, f in orm_class.schema.fields.items(): cls_ref_s = f.schema if cls_ref_s and self.schema == cls_ref_s: q.is_field(fn, cls_pk) found = True break if not found: raise ValueError("Did not find a foreign key field for [{}] in [{}]".format( self.orm_class.table_name, orm_class.table_name, )) return q
takes a classpath to allow query-ing from another Orm class the reason why it takes string paths is to avoid infinite recursion import problems because an orm class from module A might have a ref from module B and sometimes it is handy to have module B be able to get the objects from module A that correspond to the object in module B, but you can't import module A into module B because module B already imports module A. orm_classpath -- string -- a full python class path (eg, foo.bar.Che) cls_pk -- mixed -- automatically set the where field of orm_classpath that references self.orm_class to the value in cls_pk if present return -- Query()
def get_planet(planet_id): result = _get(planet_id, settings.PLANETS) return Planet(result.content)
Return a single planet
def close(self): try: self.dut.close() except Exception: logging.warning('Closing DUT was not successful') else: logging.debug('Closed DUT')
Releasing hardware resources.
def set(self, name, value, overwrite=False): if hasattr(self, name): if overwrite: setattr(self, name, value) else: self._log.warning("Configuration parameter %s exists and overwrite not allowed" % name) raise Exception("Configuration parameter %s exists and overwrite not allowed" % name) else: setattr(self, name, value) return getattr(self, name)
Sets a new value for a given configuration parameter. If it already exists, an Exception is thrown. To overwrite an existing value, set overwrite to True. :param name: Unique name of the parameter :param value: Value of the configuration parameter :param overwrite: If true, an existing parameter of *name* gets overwritten without warning or exception. :type overwrite: boolean
def output(self, value, normal=False, color=None, error=False, arrow=False, indent=None): if error and value and (normal or self.verbose): return self._print(value, color='red', indent=indent) if self.verbose or normal: return self._print(value, color, arrow, indent) return
Handles verbosity of this calls. if priority is set to 1, the value is printed if class instance verbose is True, the value is printed :param value: a string representing the message to be printed :type value: String :param normal: if set to true the message is always printed, otherwise it is only shown if verbosity is set :type normal: boolean :param color: The color of the message, choices: 'red', 'green', 'blue' :type normal: String :param error: if set to true the message appears in red :type error: Boolean :param arrow: if set to true an arrow appears before the message :type arrow: Boolean :param indent: indents the message based on the number provided :type indent: Boolean :returns: void
def _set_status_data(self, userdata): self._on_mask = userdata['d3'] self._off_mask = userdata['d4'] self._x10_house_code = userdata['d5'] self._x10_unit = userdata['d6'] self._ramp_rate = userdata['d7'] self._on_level = userdata['d8'] self._led_brightness = userdata['d9'] self._non_toggle_mask = userdata['d10'] self._led_bit_mask = userdata['d11'] self._x10_all_bit_mask = userdata['d12'] self._on_off_bit_mask = userdata['d13'] self._trigger_group_bit_mask = userdata['d14']
Set status properties from userdata response. Response values: d3: On Mask d4: Off Mask d5: X10 House Code d6: X10 Unit d7: Ramp Rate d8: On-Level d9: LED Brightness d10: Non-Toggle Mask d11: LED Bit Mask d12: X10 ALL Bit Mask d13: On/Off Bit Mask
def calculate_manual_reading(basic_data: BasicMeterData) -> Reading: t_start = basic_data.previous_register_read_datetime t_end = basic_data.current_register_read_datetime read_start = basic_data.previous_register_read read_end = basic_data.current_register_read value = basic_data.quantity uom = basic_data.uom quality_method = basic_data.current_quality_method return Reading(t_start, t_end, value, uom, quality_method, "", "", read_start, read_end)
Calculate the interval between two manual readings
def list_rocs_files(url=ROCS_URL): soup = BeautifulSoup(get(url)) if not url.endswith('/'): url += '/' files = [] for elem in soup.findAll('a'): if elem['href'].startswith('?'): continue if elem.string.lower() == 'parent directory': continue files.append(url + elem['href']) return files
Gets the contents of the given url.
def _git_diff(self): if self._diff_dict is None: result_dict = dict() for diff_str in self._get_included_diff_results(): diff_dict = self._parse_diff_str(diff_str) for src_path in diff_dict.keys(): if self._is_path_excluded(src_path): continue root, extension = os.path.splitext(src_path) extension = extension[1:].lower() if not self._supported_extensions or extension in self._supported_extensions: added_lines, deleted_lines = diff_dict[src_path] result_dict[src_path] = [ line for line in result_dict.get(src_path, []) if not line in deleted_lines ] + added_lines for (src_path, lines) in result_dict.items(): result_dict[src_path] = self._unique_ordered_lines(lines) self._diff_dict = result_dict return self._diff_dict
Run `git diff` and returns a dict in which the keys are changed file paths and the values are lists of line numbers. Guarantees that each line number within a file is unique (no repeats) and in ascending order. Returns a cached result if called multiple times. Raises a GitDiffError if `git diff` has an error.
def environ_setenv(self, tag, data): environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all)
Set the salt-minion main process environment according to the data contained in the minion event data
def _create_event(self, event_state, event_type, event_value, proc_list, proc_desc, peak_time): if event_state == "WARNING" or event_state == "CRITICAL": self.set_process_sort(event_type) item = [ time.mktime(datetime.now().timetuple()), -1, event_state, event_type, event_value, event_value, event_value, event_value, 1, [], proc_desc, glances_processes.sort_key] self.events_list.insert(0, item) if self.len() > self.events_max: self.events_list.pop() return True else: return False
Add a new item in the log list. Item is added only if the criticity (event_state) is WARNING or CRITICAL.
def patchURL(self, url, headers, body): return self._load_resource("PATCH", url, headers, body)
Request a URL using the HTTP method PATCH.
def predict_is(self, h): result = pd.DataFrame([self.run(h=h)[2]]).T result.index = self.index[-h:] return result
Outputs predictions for the Aggregate algorithm on the in-sample data Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of ensemble predictions
def start_tasks(self): while self.tasks_at_once > len(self.pending_results) and self._has_more_tasks(): task, parent_result = self.tasks.popleft() self.execute_task(task, parent_result)
Start however many tasks we can based on our limits and what we have left to finish.
def re_evaluate(local_dict=None): try: compiled_ex = _numexpr_last['ex'] except KeyError: raise RuntimeError("not a previous evaluate() execution found") argnames = _numexpr_last['argnames'] args = getArguments(argnames, local_dict) kwargs = _numexpr_last['kwargs'] with evaluate_lock: return compiled_ex(*args, **kwargs)
Re-evaluate the previous executed array expression without any check. This is meant for accelerating loops that are re-evaluating the same expression repeatedly without changing anything else than the operands. If unsure, use evaluate() which is safer. Parameters ---------- local_dict : dictionary, optional A dictionary that replaces the local operands in current frame.
def thread_pool(*workers, results=None, end_of_queue=EndOfQueue): if results is None: results = Queue(end_of_queue=end_of_queue) count = thread_counter(results.close) @pull def thread_pool_results(source): for worker in workers: t = threading.Thread( target=count(patch), args=(pull(source) >> worker, results.sink), daemon=True) t.start() yield from results.source() return thread_pool_results
Returns a |pull| object, call it ``r``, starting a thread for each given worker. Each thread pulls from the source that ``r`` is connected to, and the returned results are pushed to a |Queue|. ``r`` yields from the other end of the same |Queue|. The target function for each thread is |patch|, which can be stopped by exhausting the source. If all threads have ended, the result queue receives end-of-queue. :param results: If results should go somewhere else than a newly constructed |Queue|, a different |Connection| object can be given. :type results: |Connection| :param end_of_queue: end-of-queue signal object passed on to the creation of the |Queue| object. :rtype: |pull|
def get_value_as_list(self, dictionary, key): if key not in dictionary: return None value = dictionary[key] if not isinstance(value, list): return [value] else: return value
Helper function to check and convert a value to list. Helper function to check and convert a value to json list. This helps the ribcl data to be generalized across the servers. :param dictionary: a dictionary to check in if key is present. :param key: key to be checked if thats present in the given dictionary. :returns the data converted to a list.
def _get_future_tasks(self): self.alerts = {} now = std_now() for task in objectmodels['task'].find({'alert_time': {'$gt': now}}): self.alerts[task.alert_time] = task self.log('Found', len(self.alerts), 'future tasks')
Assemble a list of future alerts
def getAvg(self,varname,**kwds): if hasattr(self,varname): return np.mean(getattr(self,varname),**kwds) else: return np.nan
Calculates the average of an attribute of this instance. Returns NaN if no such attribute. Parameters ---------- varname : string The name of the attribute whose average is to be calculated. This attribute must be an np.array or other class compatible with np.mean. Returns ------- avg : float or np.array The average of this attribute. Might be an array if the axis keyword is passed.
def parse_yaml(self, y): self._targets = [] if 'targets' in y: for t in y['targets']: if 'waitTime' in t['condition']: new_target = WaitTime() elif 'preceding' in t['condition']: new_target = Preceding() else: new_target = Condition() new_target.parse_yaml(t) self._targets.append(new_target) return self
Parse a YAML speficication of a message sending object into this object.
def create_prefix_dir(self, path, fmt): create_prefix_dir(self._get_os_path(path.strip('/')), fmt)
Create the prefix dir, if missing
def transform_cur_commands_interactive(_, **kwargs): event_payload = kwargs.get('event_payload', {}) cur_commands = event_payload.get('text', '').split(' ') _transform_cur_commands(cur_commands) event_payload.update({ 'text': ' '.join(cur_commands) })
Transform any aliases in current commands in interactive into their respective commands.
def alias_package(package, alias, extra_modules={}): path = package.__path__ alias_prefix = alias + '.' prefix = package.__name__ + '.' for _, name, _ in pkgutil.walk_packages(path, prefix): if name.startswith('tango.databaseds.db_access.'): continue try: if name not in sys.modules: __import__(name) except ImportError: continue alias_name = name.replace(prefix, alias_prefix) sys.modules[alias_name] = sys.modules[name] for key, value in extra_modules.items(): name = prefix + value if name not in sys.modules: __import__(name) if not hasattr(package, key): setattr(package, key, sys.modules[name]) sys.modules[alias_prefix + key] = sys.modules[name] sys.modules[alias] = sys.modules[package.__name__]
Alias a python package properly. It ensures that modules are not duplicated by trying to import and alias all the submodules recursively.
def _check(self, args): if sum(bool(args[arg]) for arg in self._mapping) > 1: raise DocoptExit(_('These options are mutually exclusive: {0}', ', '.join(self._mapping)))
Exit in case of multiple exclusive arguments.
def get_snapshots(self,**kwargs): commits = self.repository.get_commits(**kwargs) snapshots = [] for commit in commits: for key in ('committer_date','author_date'): commit[key] = datetime.datetime.fromtimestamp(commit[key+'_ts']) snapshot = GitSnapshot(commit) hasher = Hasher() hasher.add(snapshot.sha) snapshot.hash = hasher.digest.hexdigest() snapshot.project = self.project snapshot.pk = uuid.uuid4().hex snapshots.append(snapshot) return snapshots
Returns a list of snapshots in a given repository.
def in_query(expression): def _in(index, expression=expression): ev = expression() if callable(expression) else expression try: iter(ev) except TypeError: raise AttributeError('$in argument must be an iterable!') hashed_ev = [index.get_hash_for(v) for v in ev] store_keys = set() for value in hashed_ev: store_keys |= set(index.get_keys_for(value)) return list(store_keys) return _in
Match any of the values that exist in an array specified in query.
def call(method, *args, **kwargs): kwargs = clean_kwargs(**kwargs) return getattr(pyeapi_device['connection'], method)(*args, **kwargs)
Calls an arbitrary pyeapi method.
def resizeEvent(self, event): curr_item = self.currentItem() self.closePersistentEditor(curr_item) super(XMultiTagEdit, self).resizeEvent(event)
Overloads the resize event to control if we are still editing. If we are resizing, then we are no longer editing.
def server_session(model=None, session_id=None, url="default", relative_urls=False, resources="default"): if session_id is None: raise ValueError("Must supply a session_id") url = _clean_url(url) app_path = _get_app_path(url) elementid = make_id() modelid = "" if model is None else model.id src_path = _src_path(url, elementid) src_path += _process_app_path(app_path) src_path += _process_relative_urls(relative_urls, url) src_path += _process_session_id(session_id) src_path += _process_resources(resources) tag = AUTOLOAD_TAG.render( src_path = src_path, app_path = app_path, elementid = elementid, modelid = modelid, ) return encode_utf8(tag)
Return a script tag that embeds content from a specific existing session on a Bokeh server. This function is typically only useful for serving from a a specific session that was previously created using the ``bokeh.client`` API. Bokeh apps embedded using these methods will NOT set the browser window title. .. note:: Typically you will not want to save or re-use the output of this function for different or multiple page loads. Args: model (Model or None, optional) : The object to render from the session, or None. (default: None) If None, the entire document will be rendered. session_id (str) : A server session ID url (str, optional) : A URL to a Bokeh application on a Bokeh server (default: "default") If ``"default"`` the default URL ``{DEFAULT_SERVER_HTTP_URL}`` will be used. relative_urls (bool, optional) : Whether to use relative URLs for resources. If ``True`` the links generated for resources such a BokehJS JavaScript and CSS will be relative links. This should normally be set to ``False``, but must be set to ``True`` in situations where only relative URLs will work. E.g. when running the Bokeh behind reverse-proxies under certain configurations resources (str) : A string specifying what resources need to be loaded along with the document. If ``default`` then the default JS/CSS bokeh files will be loaded. If None then none of the resource files will be loaded. This is useful if you prefer to serve those resource files via other means (e.g. from a caching server). Be careful, however, that the resource files you'll load separately are of the same version as that of the server's, otherwise the rendering may not work correctly. Returns: A ``<script>`` tag that will embed content from a Bokeh Server. .. warning:: It is typically a bad idea to re-use the same ``session_id`` for every page load. This is likely to create scalability and security problems, and will cause "shared Google doc" behavior, which is probably not desired.
def string_asset(class_obj: type) -> type: assert isinstance(class_obj, type), "class_obj is not a Class" global _string_asset_resource_type _string_asset_resource_type = class_obj return class_obj
Decorator to annotate the StringAsset class. Registers the decorated class as the StringAsset known type.
def from_custom_template(cls, searchpath, name): loader = ChoiceLoader([ FileSystemLoader(searchpath), cls.loader, ]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set.
def maybe_pause_consumer(self): if self.load >= 1.0: if self._consumer is not None and not self._consumer.is_paused: _LOGGER.debug("Message backlog over load at %.2f, pausing.", self.load) self._consumer.pause()
Check the current load and pause the consumer if needed.
def unique(iterable, key=None): ensure_iterable(iterable) key = hash if key is None else ensure_callable(key) def generator(): seen = set() for elem in iterable: k = key(elem) if k not in seen: seen.add(k) yield elem return generator()
Removes duplicates from given iterable, using given key as criterion. :param key: Key function which returns a hashable, uniquely identifying an object. :return: Iterable with duplicates removed
def _bsd_addif(br, iface): kernel = __grains__['kernel'] if kernel == 'NetBSD': cmd = _tool_path('brconfig') brcmd = 'add' else: cmd = _tool_path('ifconfig') brcmd = 'addem' if not br or not iface: return False return __salt__['cmd.run']('{0} {1} {2} {3}'.format(cmd, br, brcmd, iface), python_shell=False)
Internal, adds an interface to a bridge
def special_validate(data, schema): jsonschema.validate(data, schema) data['special'] = str(data['name'] == 'Garfield').lower()
Custom validation function which inserts an special flag depending on the cat's name
def resolve_response_data(head_key, data_key, data): new_data = [] if isinstance(data, list): for data_row in data: if head_key in data_row and data_key in data_row[head_key]: if isinstance(data_row[head_key][data_key], list): new_data += data_row[head_key][data_key] else: new_data.append(data_row[head_key][data_key]) elif data_key in data_row: return data_row[data_key] else: if head_key in data and data_key in data[head_key]: new_data += data[head_key][data_key] elif data_key in data: return data[data_key] return new_data
Resolves the responses you get from billomat If you have done a get_one_element request then you will get a dictionary If you have done a get_all_elements request then you will get a list with all elements in it :param head_key: the head key e.g: CLIENTS :param data_key: the data key e.g: CLIENT :param data: the responses you got :return: dict or list
def update(self, version, reason=None): _check_version_format(version) return self.collection.update({'_id': 'manifest'}, { '$set': {'version': version}, '$push': {'history': { 'timestamp': datetime.utcnow(), 'version': version, 'reason': reason}} })
Modify the datamodel's manifest :param version: New version of the manifest :param reason: Optional reason of the update (i.g. "Update from x.y.z")
def get_project(self, project_id): try: UUID(project_id, version=4) except ValueError: raise aiohttp.web.HTTPBadRequest(text="Project ID {} is not a valid UUID".format(project_id)) if project_id not in self._projects: raise aiohttp.web.HTTPNotFound(text="Project ID {} doesn't exist".format(project_id)) return self._projects[project_id]
Returns a Project instance. :param project_id: Project identifier :returns: Project instance
def getKeplerFov(fieldnum): info = getFieldInfo(fieldnum) ra, dec, scRoll = info["ra"], info["dec"], info["roll"] fovRoll = fov.getFovAngleFromSpacecraftRoll(scRoll) brokenChannels = [5, 6, 7, 8, 17, 18, 19, 20] if fieldnum > 10: brokenChannels.extend([9, 10, 11, 12]) if fieldnum == 1000: brokenChannels = [] return fov.KeplerFov(ra, dec, fovRoll, brokenChannels=brokenChannels)
Returns a `fov.KeplerFov` object for a given campaign. Parameters ---------- fieldnum : int K2 Campaign number. Returns ------- fovobj : `fov.KeplerFov` object Details the footprint of the requested K2 campaign.
def make_parser(): parser = ArgumentParser( description='Start an IRC bot instance from the command line.', formatter_class=ArgumentDefaultsHelpFormatter, ) parser.add_argument( '-v', '--version', action='version', version='{0} v{1}'.format(NAME, VERSION) ) parser.add_argument( '-s', '--server', metavar='HOST', required=True, help='the host to connect to' ) parser.add_argument( '-p', '--port', metavar='PORT', type=int, default=6667, help='the port the server is listening on' ) parser.add_argument( '-n', '--nick', metavar='NAME', required=True, help="the bot's nickname" ) parser.add_argument( '-N', '--name', metavar='NAME', default=NAME, help="the bot's real name" ) parser.add_argument( '-c', '--channels', metavar='CHAN', nargs='*', help='join this channel upon connection' ) parser.add_argument( '-l', '--log', metavar='LEVEL', default='INFO', help='minimal level for displayed logging messages' ) parser.add_argument( '-S', '--ssl', action='store_true', help='connect to the server using SSL' ) return parser
Creates an argument parser configured with options to run a bot from the command line. :return: configured argument parser :rtype: :class:`argparse.ArgumentParser`
def add_shellwidget(self, shellwidget): shellwidget_id = id(shellwidget) if shellwidget_id not in self.shellwidgets: self.options_button.setVisible(True) nsb = NamespaceBrowser(self, options_button=self.options_button) nsb.set_shellwidget(shellwidget) nsb.setup(**self.get_settings()) nsb.sig_option_changed.connect(self.change_option) nsb.sig_free_memory.connect(self.free_memory) self.add_widget(nsb) self.shellwidgets[shellwidget_id] = nsb self.set_shellwidget_from_id(shellwidget_id) return nsb
Register shell with variable explorer. This function opens a new NamespaceBrowser for browsing the variables in the shell.
def chain_tasks(tasks): if tasks: previous_task = None for task in tasks: if task is not None: if previous_task is not None: task.set_run_after(previous_task) previous_task = task return tasks
Chain given tasks. Set each task to run after its previous task. :param tasks: Tasks list. :return: Given tasks list.
async def dict(self, full): node = await self.open(full) return await HiveDict.anit(self, node)
Open a HiveDict at the given full path.
def write_metadata(self, key, values): values = Series(values) self.parent.put(self._get_metadata_path(key), values, format='table', encoding=self.encoding, errors=self.errors, nan_rep=self.nan_rep)
write out a meta data array to the key as a fixed-format Series Parameters ---------- key : string values : ndarray
def cancel(self, request, *args, **kwargs): status = self.get_object() status.cancel() serializer = StatusSerializer(status, context={'request': request}) return Response(serializer.data)
Cancel the task associated with the specified status record. Arguments: request (Request): A POST including a task status record ID Returns ------- Response: A JSON response indicating whether the cancellation succeeded or not