blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8795f63c38b72c8cce9c6378c30086f374f50df5
|
3ee5bf329a2e58eb9f775ec5ee6a329fd3541e36
|
/tests/test_link.py
|
7ba0851ae2e6f0a5155bf82ecebb8180bcc2b244
|
[
"BSD-3-Clause"
] |
permissive
|
scrapy/scrapy
|
53bd79e500e2cb7441d33bfd61ba003962d5fb46
|
cddb8c15d66831dc4e1bc4b745fcc6c534bb03dc
|
refs/heads/master
| 2023-08-31T04:08:06.193342
| 2023-08-30T18:29:54
| 2023-08-30T18:29:54
| 529,502
| 47,472
| 12,120
|
BSD-3-Clause
| 2023-09-14T12:08:07
| 2010-02-22T02:01:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
test_link.py
|
import unittest
from scrapy.link import Link
class LinkTest(unittest.TestCase):
def _assert_same_links(self, link1, link2):
self.assertEqual(link1, link2)
self.assertEqual(hash(link1), hash(link2))
def _assert_different_links(self, link1, link2):
self.assertNotEqual(link1, link2)
self.assertNotEqual(hash(link1), hash(link2))
def test_eq_and_hash(self):
l1 = Link("http://www.example.com")
l2 = Link("http://www.example.com/other")
l3 = Link("http://www.example.com")
self._assert_same_links(l1, l1)
self._assert_different_links(l1, l2)
self._assert_same_links(l1, l3)
l4 = Link("http://www.example.com", text="test")
l5 = Link("http://www.example.com", text="test2")
l6 = Link("http://www.example.com", text="test")
self._assert_same_links(l4, l4)
self._assert_different_links(l4, l5)
self._assert_same_links(l4, l6)
l7 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=False
)
l8 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=False
)
l9 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=True
)
l10 = Link(
"http://www.example.com", text="test", fragment="other", nofollow=False
)
self._assert_same_links(l7, l8)
self._assert_different_links(l7, l9)
self._assert_different_links(l7, l10)
def test_repr(self):
l1 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=True
)
l2 = eval(repr(l1))
self._assert_same_links(l1, l2)
def test_bytes_url(self):
with self.assertRaises(TypeError):
Link(b"http://www.example.com/\xc2\xa3")
|
dff28d17c01daa860148cacdc7c7c7abe91f1ae4
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/rllib/utils/schedules/__init__.py
|
58503926968563c0dfcf6f1cf95ed04ca1a92cf1
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 584
|
py
|
__init__.py
|
from ray.rllib.utils.schedules.schedule import Schedule
from ray.rllib.utils.schedules.constant_schedule import ConstantSchedule
from ray.rllib.utils.schedules.linear_schedule import LinearSchedule
from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule
from ray.rllib.utils.schedules.polynomial_schedule import PolynomialSchedule
from ray.rllib.utils.schedules.exponential_schedule import ExponentialSchedule
__all__ = [
"ConstantSchedule",
"ExponentialSchedule",
"LinearSchedule",
"Schedule",
"PiecewiseSchedule",
"PolynomialSchedule",
]
|
33195d1a8e82b111ed9525141e6fa5a6b4fc2e3e
|
23cb4636462e4cd85f0ab238684b37f75a176396
|
/src/gc/gc.py
|
b28d4db2e8dd6dec5ef52806069322c16d55b11e
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SAP/InfraBox
|
0c4489de996438b332f1cea70e767ba3ff906117
|
481f23d5fce7c9654bc0b0a5a54d4c77b728adf0
|
refs/heads/master
| 2023-08-14T17:58:39.147263
| 2023-07-24T07:12:50
| 2023-07-24T07:12:50
| 126,344,645
| 275
| 81
|
Apache-2.0
| 2023-09-12T05:52:12
| 2018-03-22T14:06:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,549
|
py
|
gc.py
|
import time
from datetime import datetime, timedelta
from pyinfraboxutils import get_logger, get_env
from pyinfraboxutils import dbpool
from pyinfraboxutils.storage import storage, SWIFT
logger = get_logger("gc")
class GC(object):
def run(self):
# TODO: Delete storage objects: uploads, outputs
# TODO: Delete images from registry
while True:
db = dbpool.get()
try:
logger.info('Starting next GC run')
self._gc(db)
logger.info('Finished GC run')
logger.info('')
except Exception as e:
logger.exception(e)
finally:
dbpool.put(db)
time.sleep(3600)
def _gc(self, db):
self._gc_job_console_output(db)
self._gc_job_output(db)
self._gc_test_runs(db)
self._gc_orphaned_projects(db)
self._gc_storage_job_cache(db)
self._gc_swift()
def _gc_job_console_output(self, db):
# Delete the console output of jobs
# which are older than 30 days
r = db.execute_one_dict('''
SELECT count(*) as count
FROM job
WHERE created_at < NOW() - INTERVAL '30 days'
AND console != 'deleted'
''')
logger.info('Deleting console output of %s jobs', r['count'])
r = db.execute('''
UPDATE job
SET console = 'deleted'
WHERE created_at < NOW() - INTERVAL '30 days'
AND console != 'deleted'
''')
db.commit()
def _gc_test_runs(self, db):
# Delete the test_runs
# which are older than 30 days
r = db.execute_one_dict('''
SELECT count(*) as count
FROM test_run
WHERE timestamp < NOW() - INTERVAL '14 days'
''')
logger.info('Deleting %s test_runs', r['count'])
r = db.execute('''
DELETE
FROM test_run
WHERE timestamp < NOW() - INTERVAL '14 days'
''')
db.commit()
def _gc_job_output(self, db):
# Delete orphaned entries in the console table
# which are older than one day
r = db.execute_one_dict('''
SELECT count(*) count
FROM console
WHERE date < NOW() - INTERVAL '1 day'
''')
logger.info('Deleting %s orphaned console entries', r['count'])
r = db.execute('''
DELETE
FROM console
WHERE date < NOW() - INTERVAL '1 day'
''')
db.commit()
def _gc_orphaned_projects(self, db):
# All the orphaned rows after a
# project has been deleted
tables = [
'auth_token', 'build', 'collaborator', 'commit',
'job', 'job_badge', 'job_markup', 'measurement',
'pull_request', 'repository', 'secret', 'source_upload',
'test_run'
]
for t in tables:
self._gc_table_content_of_deleted_project(db, t)
def _gc_table_content_of_deleted_project(self, db, table):
r = db.execute_one_dict('''
SELECT count(*) as count
FROM %s
WHERE NOT EXISTS (
SELECT project.id
FROM project
WHERE %s.project_id = project.id
)
''' % (table, table))
logger.info('Deleting %s orphaned rows from %s', r['count'], table)
db.execute('''
DELETE
FROM %s
WHERE NOT EXISTS (
SELECT project.id
FROM project
WHERE %s.project_id = project.id
)
''' % (table, table))
db.commit()
def _gc_storage_source_upload(self):
pass
def _gc_swift(self):
if not isinstance(storage, SWIFT):
return
client = storage._get_client()
for folder in ("archive/", "output/", "upload/"):
_, data = client.get_container(storage.container, prefix=folder, full_listing=True)
now = datetime.now()
for obj in data:
# FIXME: when migrated to Python3, we can just use fromisoformat
# last_modified = datetime.fromisoformat(obj["last_modified"])
last_modified = datetime.strptime(obj["last_modified"], "%Y-%m-%dT%H:%M:%S.%f")
if now - last_modified > timedelta(days=7):
storage._delete(obj["name"])
logger.info("deleted obj {}".format(obj['name']))
def _gc_storage_job_cache(self, db):
# Delete all cache of all jobs which have not
# been executed in the last 7 days
r = db.execute_many_dict('''
SELECT DISTINCT project_id, name
FROM job
WHERE
created_at > NOW() - INTERVAL '14 days'
EXCEPT
SELECT DISTINCT project_id, name from job where created_at > NOW() - INTERVAL '7 days'
''')
logger.info('Deleting caches of %s jobs', len(r))
for j in r:
logger.info('Deleting cache %s/%s', j['project_id'], j['name'])
key = 'project_%s_job_%s.tar.snappy' % (j['project_id'], j['name'])
storage.delete_cache(key)
def main():
get_env('INFRABOX_DATABASE_DB')
get_env('INFRABOX_DATABASE_USER')
get_env('INFRABOX_DATABASE_PASSWORD')
get_env('INFRABOX_DATABASE_HOST')
get_env('INFRABOX_DATABASE_PORT')
gc = GC()
gc.run()
if __name__ == "__main__":
main()
|
5ce1f5a62003af0cdbca13ae8ef81b8597ad93ca
|
edc1134436a79ca883a0d25f3c8dfffc4235c514
|
/pyro/contrib/tracking/dynamic_models.py
|
cdefc504bec394dbd998393718e4d3f853b57006
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/pyro
|
2283d8ca528fc090c724a3a6e0f344e505ebbf77
|
0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81
|
refs/heads/dev
| 2023-08-18T00:35:28.014919
| 2023-08-06T21:01:36
| 2023-08-06T21:01:36
| 94,506,832
| 3,647
| 606
|
Apache-2.0
| 2023-09-14T13:52:14
| 2017-06-16T05:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 16,560
|
py
|
dynamic_models.py
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from abc import ABCMeta, abstractmethod
import torch
from torch import nn
from torch.nn import Parameter
import pyro.distributions as dist
from pyro.distributions.util import eye_like
class DynamicModel(nn.Module, metaclass=ABCMeta):
"""
Dynamic model interface.
:param dimension: native state dimension.
:param dimension_pv: PV state dimension.
:param num_process_noise_parameters: process noise parameter space dimension.
This for UKF applications. Can be left as ``None`` for EKF and most
other filters.
"""
def __init__(self, dimension, dimension_pv, num_process_noise_parameters=None):
self._dimension = dimension
self._dimension_pv = dimension_pv
self._num_process_noise_parameters = num_process_noise_parameters
super().__init__()
@property
def dimension(self):
"""
Native state dimension access.
"""
return self._dimension
@property
def dimension_pv(self):
"""
PV state dimension access.
"""
return self._dimension_pv
@property
def num_process_noise_parameters(self):
"""
Process noise parameters space dimension access.
"""
return self._num_process_noise_parameters
@abstractmethod
def forward(self, x, dt, do_normalization=True):
"""
Integrate native state ``x`` over time interval ``dt``.
:param x: current native state. If the DynamicModel is non-differentiable,
be sure to handle the case of ``x`` being augmented with process
noise parameters.
:param dt: time interval to integrate over.
:param do_normalization: whether to perform normalization on output, e.g.,
mod'ing angles into an interval.
:return: Native state x integrated dt into the future.
"""
raise NotImplementedError
def geodesic_difference(self, x1, x0):
"""
Compute and return the geodesic difference between 2 native states.
This is a generalization of the Euclidean operation ``x1 - x0``.
:param x1: native state.
:param x0: native state.
:return: Geodesic difference between native states ``x1`` and ``x2``.
"""
return x1 - x0 # Default to Euclidean behavior.
@abstractmethod
def mean2pv(self, x):
"""
Compute and return PV state from native state. Useful for combining
state estimates of different types in IMM (Interacting Multiple Model)
filtering.
:param x: native state estimate mean.
:return: PV state estimate mean.
"""
raise NotImplementedError
@abstractmethod
def cov2pv(self, P):
"""
Compute and return PV covariance from native covariance. Useful for
combining state estimates of different types in IMM (Interacting
Multiple Model) filtering.
:param P: native state estimate covariance.
:return: PV state estimate covariance.
"""
raise NotImplementedError
@abstractmethod
def process_noise_cov(self, dt=0.0):
"""
Compute and return process noise covariance (Q).
:param dt: time interval to integrate over.
:return: Read-only covariance (Q). For a DifferentiableDynamicModel, this is
the covariance of the native state ``x`` resulting from stochastic
integration (for use with EKF). Otherwise, it is the covariance
directly of the process noise parameters (for use with UKF).
"""
raise NotImplementedError
def process_noise_dist(self, dt=0.0):
"""
Return a distribution object of state displacement from the process noise
distribution over a time interval.
:param dt: time interval that process noise accumulates over.
:return: :class:`~pyro.distributions.torch.MultivariateNormal`.
"""
Q = self.process_noise_cov(dt)
return dist.MultivariateNormal(
torch.zeros(Q.shape[-1], dtype=Q.dtype, device=Q.device), Q
)
class DifferentiableDynamicModel(DynamicModel):
"""
DynamicModel for which state transition Jacobians can be efficiently
calculated, usu. analytically or by automatic differentiation.
"""
@abstractmethod
def jacobian(self, dt):
"""
Compute and return native state transition Jacobian (F) over time
interval ``dt``.
:param dt: time interval to integrate over.
:return: Read-only Jacobian (F) of integration map (f).
"""
raise NotImplementedError
class Ncp(DifferentiableDynamicModel):
"""
NCP (Nearly-Constant Position) dynamic model. May be subclassed, e.g., with
CWNV (Continuous White Noise Velocity) or DWNV (Discrete White Noise
Velocity).
:param dimension: native state dimension.
:param sv2: variance of velocity. Usually chosen so that the standard
deviation is roughly half of the max velocity one would ever expect
to observe.
"""
def __init__(self, dimension, sv2):
dimension_pv = 2 * dimension
super().__init__(dimension, dimension_pv, num_process_noise_parameters=1)
if not isinstance(sv2, torch.Tensor):
sv2 = torch.tensor(sv2)
self.sv2 = Parameter(sv2)
self._F_cache = eye_like(sv2, dimension) # State transition matrix cache
self._Q_cache = {} # Process noise cov cache
def forward(self, x, dt, do_normalization=True):
"""
Integrate native state ``x`` over time interval ``dt``.
:param x: current native state. If the DynamicModel is non-differentiable,
be sure to handle the case of ``x`` being augmented with process
noise parameters.
:param dt: time interval to integrate over.
do_normalization: whether to perform normalization on output, e.g.,
mod'ing angles into an interval. Has no effect for this subclass.
:return: Native state x integrated dt into the future.
"""
return x
def mean2pv(self, x):
"""
Compute and return PV state from native state. Useful for combining
state estimates of different types in IMM (Interacting Multiple Model)
filtering.
:param x: native state estimate mean.
:return: PV state estimate mean.
"""
with torch.no_grad():
x_pv = torch.zeros(2 * self._dimension, dtype=x.dtype, device=x.device)
x_pv[: self._dimension] = x
return x_pv
def cov2pv(self, P):
"""
Compute and return PV covariance from native covariance. Useful for
combining state estimates of different types in IMM (Interacting
Multiple Model) filtering.
:param P: native state estimate covariance.
:return: PV state estimate covariance.
"""
d = 2 * self._dimension
with torch.no_grad():
P_pv = torch.zeros(d, d, dtype=P.dtype, device=P.device)
P_pv[: self._dimension, : self._dimension] = P
return P_pv
def jacobian(self, dt):
"""
Compute and return cached native state transition Jacobian (F) over
time interval ``dt``.
:param dt: time interval to integrate over.
:return: Read-only Jacobian (F) of integration map (f).
"""
return self._F_cache
@abstractmethod
def process_noise_cov(self, dt=0.0):
"""
Compute and return cached process noise covariance (Q).
:param dt: time interval to integrate over.
:return: Read-only covariance (Q) of the native state ``x`` resulting from
stochastic integration (for use with EKF).
"""
raise NotImplementedError
class Ncv(DifferentiableDynamicModel):
"""
NCV (Nearly-Constant Velocity) dynamic model. May be subclassed, e.g., with
CWNA (Continuous White Noise Acceleration) or DWNA (Discrete White Noise
Acceleration).
:param dimension: native state dimension.
:param sa2: variance of acceleration. Usually chosen so that the standard
deviation is roughly half of the max acceleration one would ever
expect to observe.
"""
def __init__(self, dimension, sa2):
dimension_pv = dimension
super().__init__(dimension, dimension_pv, num_process_noise_parameters=1)
if not isinstance(sa2, torch.Tensor):
sa2 = torch.tensor(sa2)
self.sa2 = Parameter(sa2)
self._F_cache = {} # State transition matrix cache
self._Q_cache = {} # Process noise cov cache
def forward(self, x, dt, do_normalization=True):
"""
Integrate native state ``x`` over time interval ``dt``.
:param x: current native state. If the DynamicModel is non-differentiable,
be sure to handle the case of ``x`` being augmented with process
noise parameters.
:param dt: time interval to integrate over.
:param do_normalization: whether to perform normalization on output, e.g.,
mod'ing angles into an interval. Has no effect for this subclass.
:return: Native state x integrated dt into the future.
"""
F = self.jacobian(dt)
return F.mm(x.unsqueeze(1)).squeeze(1)
def mean2pv(self, x):
"""
Compute and return PV state from native state. Useful for combining
state estimates of different types in IMM (Interacting Multiple Model)
filtering.
:param x: native state estimate mean.
:return: PV state estimate mean.
"""
return x
def cov2pv(self, P):
"""
Compute and return PV covariance from native covariance. Useful for
combining state estimates of different types in IMM (Interacting
Multiple Model) filtering.
:param P: native state estimate covariance.
:return: PV state estimate covariance.
"""
return P
def jacobian(self, dt):
"""
Compute and return cached native state transition Jacobian (F) over
time interval ``dt``.
:param dt: time interval to integrate over.
:return: Read-only Jacobian (F) of integration map (f).
"""
if dt not in self._F_cache:
d = self._dimension
with torch.no_grad():
F = eye_like(self.sa2, d)
F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)
self._F_cache[dt] = F
return self._F_cache[dt]
@abstractmethod
def process_noise_cov(self, dt=0.0):
"""
Compute and return cached process noise covariance (Q).
:param dt: time interval to integrate over.
:return: Read-only covariance (Q) of the native state ``x`` resulting from
stochastic integration (for use with EKF).
"""
raise NotImplementedError
class NcpContinuous(Ncp):
"""
NCP (Nearly-Constant Position) dynamic model with CWNV (Continuous White
Noise Velocity).
References:
"Estimation with Applications to Tracking and Navigation" by Y. Bar-
Shalom et al, 2001, p.269.
:param dimension: native state dimension.
:param sv2: variance of velocity. Usually chosen so that the standard
deviation is roughly half of the max velocity one would ever expect
to observe.
"""
def process_noise_cov(self, dt=0.0):
"""
Compute and return cached process noise covariance (Q).
:param dt: time interval to integrate over.
:return: Read-only covariance (Q) of the native state ``x`` resulting from
stochastic integration (for use with EKF).
"""
if dt not in self._Q_cache:
# q: continuous-time process noise intensity with units
# length^2/time (m^2/s). Choose ``q`` so that changes in position,
# over a sampling period ``dt``, are roughly ``sqrt(q*dt)``.
q = self.sv2 * dt
Q = q * dt * eye_like(self.sv2, self._dimension)
self._Q_cache[dt] = Q
return self._Q_cache[dt]
class NcvContinuous(Ncv):
"""
NCV (Nearly-Constant Velocity) dynamic model with CWNA (Continuous White
Noise Acceleration).
References:
"Estimation with Applications to Tracking and Navigation" by Y. Bar-
Shalom et al, 2001, p.269.
:param dimension: native state dimension.
:param sa2: variance of acceleration. Usually chosen so that the standard
deviation is roughly half of the max acceleration one would ever
expect to observe.
"""
def process_noise_cov(self, dt=0.0):
"""
Compute and return cached process noise covariance (Q).
:param dt: time interval to integrate over.
:return: Read-only covariance (Q) of the native state ``x`` resulting from
stochastic integration (for use with EKF).
"""
if dt not in self._Q_cache:
with torch.no_grad():
d = self._dimension
dt2 = dt * dt
dt3 = dt2 * dt
Q = torch.zeros(d, d, dtype=self.sa2.dtype, device=self.sa2.device)
eye = eye_like(self.sa2, d // 2)
Q[: d // 2, : d // 2] = dt3 * eye / 3.0
Q[: d // 2, d // 2 :] = dt2 * eye / 2.0
Q[d // 2 :, : d // 2] = dt2 * eye / 2.0
Q[d // 2 :, d // 2 :] = dt * eye
# sa2 * dt is an intensity factor that changes in velocity
# over a sampling period ``dt``, ideally should be ~``sqrt(q*dt)``.
Q = Q * (self.sa2 * dt)
self._Q_cache[dt] = Q
return self._Q_cache[dt]
class NcpDiscrete(Ncp):
"""
NCP (Nearly-Constant Position) dynamic model with DWNV (Discrete White
Noise Velocity).
:param dimension: native state dimension.
:param sv2: variance of velocity. Usually chosen so that the standard
deviation is roughly half of the max velocity one would ever expect
to observe.
References:
"Estimation with Applications to Tracking and Navigation" by Y. Bar-
Shalom et al, 2001, p.273.
"""
def process_noise_cov(self, dt=0.0):
"""
Compute and return cached process noise covariance (Q).
:param dt: time interval to integrate over.
:return: Read-only covariance (Q) of the native state `x` resulting from
stochastic integration (for use with EKF).
"""
if dt not in self._Q_cache:
Q = self.sv2 * dt * dt * eye_like(self.sv2, self._dimension)
self._Q_cache[dt] = Q
return self._Q_cache[dt]
class NcvDiscrete(Ncv):
"""
NCV (Nearly-Constant Velocity) dynamic model with DWNA (Discrete White
Noise Acceleration).
:param dimension: native state dimension.
:param sa2: variance of acceleration. Usually chosen so that the standard
deviation is roughly half of the max acceleration one would ever
expect to observe.
References:
"Estimation with Applications to Tracking and Navigation" by Y. Bar-
Shalom et al, 2001, p.273.
"""
def process_noise_cov(self, dt=0.0):
"""
Compute and return cached process noise covariance (Q).
:param dt: time interval to integrate over.
:return: Read-only covariance (Q) of the native state `x` resulting from
stochastic integration (for use with EKF). (Note that this Q, modulo
numerical error, has rank `dimension/2`. So, it is only positive
semi-definite.)
"""
if dt not in self._Q_cache:
with torch.no_grad():
d = self._dimension
dt2 = dt * dt
dt3 = dt2 * dt
dt4 = dt2 * dt2
Q = torch.zeros(d, d, dtype=self.sa2.dtype, device=self.sa2.device)
Q[: d // 2, : d // 2] = 0.25 * dt4 * eye_like(self.sa2, d // 2)
Q[: d // 2, d // 2 :] = 0.5 * dt3 * eye_like(self.sa2, d // 2)
Q[d // 2 :, : d // 2] = 0.5 * dt3 * eye_like(self.sa2, d // 2)
Q[d // 2 :, d // 2 :] = dt2 * eye_like(self.sa2, d // 2)
Q = Q * self.sa2
self._Q_cache[dt] = Q
return self._Q_cache[dt]
|
0f026aef71dfb1c652609b8148ea5f2c78dcb54b
|
391dfd77c1bb85c08b4ead451ecdab0858eb141f
|
/tests/test_loaders_data.py
|
fc931e38d0f3483761646c81eeb412b3bbd24727
|
[
"MIT"
] |
permissive
|
moderngl/moderngl-window
|
308682b5aa625dbb49ca554459bed9853a5e69c3
|
200f2b9ea8b350b0ac9bb6a2d24310c0d8227794
|
refs/heads/master
| 2023-05-28T00:33:49.924394
| 2023-05-18T11:06:26
| 2023-05-18T11:06:26
| 172,498,670
| 205
| 48
|
MIT
| 2023-09-01T17:45:51
| 2019-02-25T12:05:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,523
|
py
|
test_loaders_data.py
|
from pathlib import Path
from unittest import TestCase
from moderngl_window import resources
from moderngl_window.meta import DataDescription
from moderngl_window.exceptions import ImproperlyConfigured
resources.register_dir((Path(__file__).parent / 'fixtures' / 'resources').resolve())
class DataLoaderTestcase(TestCase):
def test_txt(self):
"""Ensure correct loader is selected by looking at file extension (txt)"""
text = resources.data.load(DataDescription(path='data/data.txt'))
self.assertEqual(text, "Hello")
def test_json(self):
"""Ensure correct loader is selected by looking at file extension (json)"""
json = resources.data.load(DataDescription(path='data/data.json'))
self.assertEqual(json, {"test": "Hello"})
def test_binary_kind(self):
"""Loading a binary file"""
data = resources.data.load(DataDescription(path='data/data.bin', kind="binary"))
self.assertEqual(data, b'Hello')
def test_text_kind(self):
"""Load a e textfile"""
text = resources.data.load(DataDescription(path='data/data.txt', kind="text"))
self.assertEqual(text, "Hello")
def test_json_kind(self):
"""Load a json file"""
json = resources.data.load(DataDescription(path='data/data.json', kind="json"))
self.assertEqual(json, {"test": "Hello"})
def test_txt_not_found(self):
"""Ensure ImproperlyConfigured is raised if file is not found"""
with self.assertRaises(ImproperlyConfigured):
resources.data.load(DataDescription(path='data/notfound.txt'))
def test_json_not_found(self):
"""Ensure ImproperlyConfigured is raised if file is not found"""
with self.assertRaises(ImproperlyConfigured):
resources.data.load(DataDescription(path='data/notfound.json'))
def test_binary_not_found(self):
"""Ensure ImproperlyConfigured is raised if file is not found"""
with self.assertRaises(ImproperlyConfigured):
resources.data.load(DataDescription(path='data/notfound.bin', kind="binary"))
def test_binary_abspath(self):
"""Strip search directories and use absolute path"""
path = (Path(__file__).parent / "fixtures/resources/data/data.json").resolve()
with resources.temporary_dirs([]):
json = resources.data.load(DataDescription(path=path, kind="json"))
self.assertEqual(json, {"test": "Hello"})
|
987415ce7a23ebf49472175d7b3b47719f233227
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/met_eireann/config_flow.py
|
909dd4ae955514fad90563d32bc0cedea227a8a5
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
config_flow.py
|
"""Config flow to configure Met Éireann component."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_ELEVATION, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, HOME_LOCATION_NAME
class MetEireannFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for Met Eireann component."""
VERSION = 1
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
# Check if an identical entity is already configured
await self.async_set_unique_id(
f"{user_input.get(CONF_LATITUDE)},{user_input.get(CONF_LONGITUDE)}"
)
self._abort_if_unique_id_configured()
else:
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME, default=HOME_LOCATION_NAME): str,
vol.Required(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Required(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
vol.Required(
CONF_ELEVATION, default=self.hass.config.elevation
): int,
}
),
errors=errors,
)
return self.async_create_entry(title=user_input[CONF_NAME], data=user_input)
|
d4ec7f7c0c7bd6fba7e62f14b70b8dcc0f86b1b9
|
7976cbc8e26c8db85d75820ff289b048f22d3986
|
/tests/integration/test_hypothesmith.py
|
92990aa65f16980b05828b68f1b13ad305076cc6
|
[
"MIT"
] |
permissive
|
PyCQA/isort
|
5eaf38d78f5088f7568a2056fe7868c4f79abfdd
|
e35a55f6ab8473003d68a8b2ffebdd08fbb2b61b
|
refs/heads/main
| 2023-08-28T12:03:16.110191
| 2023-08-20T20:18:05
| 2023-08-20T20:18:05
| 12,550,138
| 2,914
| 312
|
MIT
| 2023-09-06T18:45:21
| 2013-09-02T22:22:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,571
|
py
|
test_hypothesmith.py
|
import ast
from typing import get_type_hints
import hypothesis
import libcst
from hypothesis import strategies as st
from hypothesmith import from_grammar, from_node
import isort
def _as_config(kw) -> isort.Config:
if "wrap_length" in kw and "line_length" in kw:
kw["wrap_length"], kw["line_length"] = sorted([kw["wrap_length"], kw["line_length"]])
try:
return isort.Config(**kw)
except ValueError:
kw["wrap_length"] = 0
return isort.Config(**kw)
def _record_targets(code: str, prefix: str = "") -> str:
# target larger inputs - the Hypothesis engine will do a multi-objective
# hill-climbing search using these scores to generate 'better' examples.
nodes = list(ast.walk(ast.parse(code)))
import_nodes = [n for n in nodes if isinstance(n, (ast.Import, ast.ImportFrom))]
uniq_nodes = {type(n) for n in nodes}
for value, label in [
(len(import_nodes), "total number of import nodes"),
(len(uniq_nodes), "number of unique ast node types"),
]:
hypothesis.target(float(value), label=prefix + label)
return code
def configs(**force_strategies: st.SearchStrategy[isort.Config]) -> st.SearchStrategy[isort.Config]:
"""Generate arbitrary Config objects."""
skip = {
"line_ending",
"sections",
"known_future_library",
"forced_separate",
"lines_before_imports",
"lines_after_imports",
"lines_between_sections",
"lines_between_types",
"sources",
"virtual_env",
"conda_env",
"directory",
"formatter",
"formatting_function",
}
inferred_kwargs = {
k: st.from_type(v)
for k, v in get_type_hints(isort.settings._Config).items()
if k not in skip
}
specific = {
"line_length": st.integers(0, 200),
"wrap_length": st.integers(0, 200),
"indent": st.integers(0, 20).map(lambda n: n * " "),
"default_section": st.sampled_from(sorted(isort.settings.KNOWN_SECTION_MAPPING)),
"force_grid_wrap": st.integers(0, 20),
"profile": st.sampled_from(sorted(isort.settings.profiles)),
"py_version": st.sampled_from(("auto",) + isort.settings.VALID_PY_TARGETS),
}
kwargs = {**inferred_kwargs, **specific, **force_strategies}
return st.fixed_dictionaries({}, optional=kwargs).map(_as_config) # type: ignore
st.register_type_strategy(isort.Config, configs())
@hypothesis.example("import A\nimportA\r\n\n", isort.Config(), False)
@hypothesis.given(
source_code=st.lists(
from_grammar(auto_target=False)
| from_node(auto_target=False)
| from_node(libcst.Import, auto_target=False)
| from_node(libcst.ImportFrom, auto_target=False),
min_size=1,
max_size=10,
).map("\n".join),
config=st.builds(isort.Config),
disregard_skip=st.booleans(),
)
@hypothesis.seed(235738473415671197623909623354096762459)
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow, hypothesis.HealthCheck.filter_too_much]
)
def test_isort_is_idempotent(source_code: str, config: isort.Config, disregard_skip: bool) -> None:
# NOTE: if this test finds a bug, please notify @Zac-HD so that it can be added to the
# Hypothesmith trophy case. This really helps with research impact evaluations!
_record_targets(source_code)
result = isort.code(source_code, config=config, disregard_skip=disregard_skip)
assert result == isort.code(result, config=config, disregard_skip=disregard_skip)
|
66fb01d2f370c910ab453c5a96af523222b7beaa
|
de84a9c84e9fd00fb1cf52c69381b20c96463f2b
|
/tests/test_security_credit_card.py
|
1e004090cac539c6b5b9f9a59f221fb5ec8be2aa
|
[
"BSD-3-Clause"
] |
permissive
|
amperser/proselint
|
23b7b1a0963bf036dde9326b3bb0bbbfcdf26c61
|
b5b7536bec5fd461e45cacad87c2aab9ea33ac35
|
refs/heads/main
| 2023-08-11T08:45:59.641463
| 2023-07-27T13:28:58
| 2023-07-27T13:28:58
| 29,220,809
| 4,513
| 267
|
BSD-3-Clause
| 2023-09-10T20:53:11
| 2015-01-14T01:24:07
|
Python
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
test_security_credit_card.py
|
"""Tests for security.credit_card check."""
from proselint.checks.security import credit_card as chk
from .check import Check
class TestCheck(Check):
"""The test class for security.credit_card."""
__test__ = True
@property
def this_check(self):
"""Boilerplate."""
return chk
def test_smoke(self):
"""Basic smoke test for security.credit_card.
This makes use of a test MasterCard number.
"""
assert self.passes("""Smoke phrase with nothing flagged.""")
assert not self.passes(
"""My credit card number is 5555555555554444.""")
|
debcdd3111dbd67b5970d5323b40c15de3b3785d
|
edc1134436a79ca883a0d25f3c8dfffc4235c514
|
/tests/infer/reparam/test_haar.py
|
f981c9ee545d55141a15e381afdceeb426222696
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/pyro
|
2283d8ca528fc090c724a3a6e0f344e505ebbf77
|
0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81
|
refs/heads/dev
| 2023-08-18T00:35:28.014919
| 2023-08-06T21:01:36
| 2023-08-06T21:01:36
| 94,506,832
| 3,647
| 606
|
Apache-2.0
| 2023-09-14T13:52:14
| 2017-06-16T05:03:47
|
Python
|
UTF-8
|
Python
| false
| false
| 6,392
|
py
|
test_haar.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from torch.autograd import grad
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.distributions.transforms.haar import HaarTransform
from pyro.infer.autoguide.initialization import InitMessenger, init_to_value
from pyro.infer.reparam import HaarReparam
from tests.common import assert_close
from .util import check_init_reparam
# Test helper to extract central moments from samples.
def get_moments(x):
n = x.size(0)
x = x.reshape(n, -1)
mean = x.mean(0)
x = x - mean
std = (x * x).mean(0).sqrt()
x = x / std
corr = (x.unsqueeze(-1) * x.unsqueeze(-2)).mean(0).reshape(-1)
return torch.cat([mean, std, corr])
@pytest.mark.parametrize("flip", [False, True])
@pytest.mark.parametrize(
"shape,dim",
[
((6,), -1),
(
(
2,
5,
),
-1,
),
((4, 2), -2),
((2, 3, 1), -2),
],
ids=str,
)
def test_normal(shape, dim, flip):
loc = torch.empty(shape).uniform_(-1.0, 1.0).requires_grad_()
scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()
def model():
with pyro.plate_stack("plates", shape[:dim]):
with pyro.plate("particles", 10000):
pyro.sample("x", dist.Normal(loc, scale).expand(shape).to_event(-dim))
value = poutine.trace(model).get_trace().nodes["x"]["value"]
expected_probe = get_moments(value)
rep = HaarReparam(dim=dim, flip=flip)
reparam_model = poutine.reparam(model, {"x": rep})
trace = poutine.trace(reparam_model).get_trace()
assert isinstance(trace.nodes["x_haar"]["fn"], dist.TransformedDistribution)
assert isinstance(trace.nodes["x"]["fn"], dist.Delta)
value = trace.nodes["x"]["value"]
actual_probe = get_moments(value)
assert_close(actual_probe, expected_probe, atol=0.1)
for actual_m, expected_m in zip(actual_probe[:10], expected_probe[:10]):
expected_grads = grad(expected_m.sum(), [loc, scale], retain_graph=True)
actual_grads = grad(actual_m.sum(), [loc, scale], retain_graph=True)
assert_close(actual_grads[0], expected_grads[0], atol=0.05)
assert_close(actual_grads[1], expected_grads[1], atol=0.05)
@pytest.mark.parametrize("flip", [False, True])
@pytest.mark.parametrize(
"shape,dim",
[
((6,), -1),
(
(
2,
5,
),
-1,
),
((4, 2), -2),
((2, 3, 1), -2),
],
ids=str,
)
def test_uniform(shape, dim, flip):
def model():
with pyro.plate_stack("plates", shape[:dim]):
with pyro.plate("particles", 10000):
pyro.sample("x", dist.Uniform(0, 1).expand(shape).to_event(-dim))
value = poutine.trace(model).get_trace().nodes["x"]["value"]
expected_probe = get_moments(value)
reparam_model = poutine.reparam(model, {"x": HaarReparam(dim=dim, flip=flip)})
trace = poutine.trace(reparam_model).get_trace()
assert isinstance(trace.nodes["x_haar"]["fn"], dist.TransformedDistribution)
assert isinstance(trace.nodes["x"]["fn"], dist.Delta)
value = trace.nodes["x"]["value"]
actual_probe = get_moments(value)
assert_close(actual_probe, expected_probe, atol=0.1)
@pytest.mark.parametrize("flip", [False, True])
@pytest.mark.parametrize(
"shape,dim",
[
((6,), -1),
(
(
2,
5,
),
-1,
),
((4, 2), -2),
((2, 3, 1), -2),
],
ids=str,
)
def test_init(shape, dim, flip):
loc = torch.empty(shape).uniform_(-1.0, 1.0).requires_grad_()
scale = torch.empty(shape).uniform_(0.5, 1.5).requires_grad_()
def model():
with pyro.plate_stack("plates", shape[:dim]):
return pyro.sample("x", dist.Normal(loc, scale).to_event(-dim))
check_init_reparam(model, HaarReparam(dim=dim, flip=flip))
def test_nested():
shape = (5, 6)
@poutine.reparam(config={"x": HaarReparam(dim=-1), "x_haar": HaarReparam(dim=-2)})
def model():
pyro.sample("x", dist.Normal(torch.zeros(shape), 1).to_event(2))
# Try without initialization, e.g. in AutoGuide._setup_prototype().
trace = poutine.trace(model).get_trace()
assert {"x", "x_haar", "x_haar_haar"}.issubset(trace.nodes)
assert trace.nodes["x"]["is_observed"]
assert trace.nodes["x_haar"]["is_observed"]
assert not trace.nodes["x_haar_haar"]["is_observed"]
assert trace.nodes["x"]["value"].shape == shape
# Try conditioning on x_haar_haar, e.g. in Predictive.
x = torch.randn(shape)
x_haar = HaarTransform(dim=-1)(x)
x_haar_haar = HaarTransform(dim=-2)(x_haar)
with poutine.condition(data={"x_haar_haar": x_haar_haar}):
trace = poutine.trace(model).get_trace()
assert {"x", "x_haar", "x_haar_haar"}.issubset(trace.nodes)
assert trace.nodes["x"]["is_observed"]
assert trace.nodes["x_haar"]["is_observed"]
assert trace.nodes["x_haar_haar"]["is_observed"]
assert_close(trace.nodes["x"]["value"], x)
assert_close(trace.nodes["x_haar"]["value"], x_haar)
assert_close(trace.nodes["x_haar_haar"]["value"], x_haar_haar)
# Try with custom initialization.
# This is required for autoguides and MCMC.
with InitMessenger(init_to_value(values={"x": x})):
trace = poutine.trace(model).get_trace()
assert {"x", "x_haar", "x_haar_haar"}.issubset(trace.nodes)
assert trace.nodes["x"]["is_observed"]
assert trace.nodes["x_haar"]["is_observed"]
assert not trace.nodes["x_haar_haar"]["is_observed"]
assert_close(trace.nodes["x"]["value"], x)
# Try conditioning on x.
x = torch.randn(shape)
with poutine.condition(data={"x": x}):
trace = poutine.trace(model).get_trace()
assert {"x", "x_haar", "x_haar_haar"}.issubset(trace.nodes)
assert trace.nodes["x"]["is_observed"]
assert trace.nodes["x_haar"]["is_observed"]
# TODO Decide whether it is worth fixing this failing assertion.
# See https://github.com/pyro-ppl/pyro/issues/2878
# assert trace.nodes["x_haar_haar"]["is_observed"]
assert_close(trace.nodes["x"]["value"], x)
|
0032c123414027fa7194fcbfabc18abab081e05b
|
f7f3dd55fa7ab6833f3c2d8e457884c127cc203d
|
/tests/test_base/test_json.py
|
0175355380054fe7ac74337ac42690cbc553b24c
|
[
"CC0-1.0"
] |
permissive
|
linkml/linkml
|
0fe41590ea729f10b1a6e2de4a85c585f284dc22
|
2354a45838c6207b01ffabc6eda92512c3fb147b
|
refs/heads/main
| 2023-08-17T05:59:08.486218
| 2023-08-11T21:31:59
| 2023-08-11T21:31:59
| 348,419,208
| 198
| 63
|
CC0-1.0
| 2023-09-14T16:04:30
| 2021-03-16T16:34:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
test_json.py
|
import unittest
from linkml import (
LOCAL_ANNOTATIONS_YAML_FILE,
LOCAL_EXTENSIONS_YAML_FILE,
LOCAL_MAPPINGS_YAML_FILE,
LOCAL_METAMODEL_YAML_FILE,
LOCAL_TYPES_YAML_FILE,
METAANNOTATIONS_NAMESPACE,
METAEXTENSIONS_NAMESPACE,
METAMAPPING_NAMESPACE,
METAMODEL_NAMESPACE,
METATYPE_NAMESPACE,
)
from linkml.generators.jsonldgen import JSONLDGenerator
from tests.test_base.environment import env
from tests.utils.filters import json_metadata_filter
from tests.utils.generatortestcase import GeneratorTestCase
class JsonLDTestCase(GeneratorTestCase):
"""Generate the JSON for all of the models and compare them against what has been published
CJM note: switching off many tests for https://github.com/linkml/linkml/pull/924 too hard to debug
"""
env = env
def test_types_jsonld(self):
"""Build includes/types.jsonld"""
self.model_name = "types"
self.single_file_generator(
"json",
JSONLDGenerator,
yaml_file=LOCAL_TYPES_YAML_FILE,
serialize_args=dict(base=METATYPE_NAMESPACE),
filtr=json_metadata_filter,
)
def test_mappings_jsonld(self):
"""Build includes/mappings.jsonld"""
self.model_name = "mappings"
self.single_file_generator(
"json",
JSONLDGenerator,
yaml_file=LOCAL_MAPPINGS_YAML_FILE,
serialize_args=dict(base=METAMAPPING_NAMESPACE),
filtr=json_metadata_filter,
)
@unittest.skip("See note above")
def test_extensions_jsonld(self):
"""Build includes/extensions.jsonld"""
self.model_name = "extensions"
self.single_file_generator(
"json",
JSONLDGenerator,
yaml_file=LOCAL_EXTENSIONS_YAML_FILE,
serialize_args=dict(base=METAEXTENSIONS_NAMESPACE),
filtr=json_metadata_filter,
)
@unittest.skip("See note above")
def test_annotations_jsonld(self):
"""Build includes/annotations.jsonld"""
self.model_name = "annotations"
self.single_file_generator(
"json",
JSONLDGenerator,
yaml_file=LOCAL_ANNOTATIONS_YAML_FILE,
serialize_args=dict(base=METAANNOTATIONS_NAMESPACE),
filtr=json_metadata_filter,
)
@unittest.skip("See note above")
def test_metamodel_jsonld(self):
"""Build meta.jsonld"""
self.model_name = "meta"
self.single_file_generator(
"json",
JSONLDGenerator,
yaml_file=LOCAL_METAMODEL_YAML_FILE,
serialize_args=dict(base=METAMODEL_NAMESPACE),
filtr=json_metadata_filter,
)
if __name__ == "__main__":
unittest.main()
|
b9ecdfebbf4e76fabf93a24159715124818a6e22
|
d46cfd98eb2e4d50645baa5dfbb4171b2646457c
|
/test/run.py
|
fb61fc760dcd6c9ee6a37c73fe47d78d295e09b3
|
[
"MIT"
] |
permissive
|
dockcross/dockcross
|
4c978754c1d3b7f351f1028c6c4331f4498c72cd
|
85db3452be297dacec9f98dc1832801212254395
|
refs/heads/master
| 2023-08-18T11:09:28.057558
| 2023-08-09T15:24:19
| 2023-08-09T15:24:19
| 35,052,287
| 2,955
| 415
|
MIT
| 2023-09-05T01:56:18
| 2015-05-04T18:36:08
|
Shell
|
UTF-8
|
Python
| false
| false
| 5,942
|
py
|
run.py
|
#!/usr/bin/env python
"""Test that the toolchain can build executables.
Multiple build tools and languages are supported. If an emulator is available,
its ability to run the generated executables is also tested.
"""
import argparse
import glob
import os
import shutil
import subprocess
import sys
import tempfile
def test_none_build_system(build_dir, language, source, linker_flags, exe_suffix):
build_cmd = list()
if language == 'C':
compiler = os.getenv('CC', 'cc')
elif language == 'C++':
compiler = os.getenv('CXX', 'c++')
else:
print('Unknown language: ' + language)
return 1
build_cmd.append(compiler)
if linker_flags:
build_cmd.extend(linker_flags)
build_cmd.append(source)
build_cmd.append('-o')
build_cmd.append('a.out' + exe_suffix)
print('Building ' + source + ' by calling ' + compiler + '...')
print(' '.join(build_cmd))
sys.stdout.flush()
return subprocess.call(build_cmd)
def test_cmake_build_system(build_dir, language, source, emulator, linker_flags,
exe_suffix):
shutil.copy(source, build_dir)
print('Building ' + source + ' with CMake...')
with open('CMakeLists.txt', 'w') as fp:
fp.write('cmake_minimum_required(VERSION 3.0)\n')
fp.write('project(test-compiler)\n')
fp.write('add_executable(a.out ' + os.path.basename(source) + ')\n')
if emulator:
fp.write('enable_testing()\n')
fp.write('add_test(NAME emulator-in-cmake COMMAND a.out)\n')
os.mkdir('build')
os.chdir('build')
cmake_configuration_cmd = ['cmake', '..']
if linker_flags:
cmake_configuration_cmd.insert(1,
'-DCMAKE_EXE_LINKER_FLAGS="{0}"'.format(' '.join(linker_flags)))
print(' '.join(cmake_configuration_cmd))
sys.stdout.flush()
if subprocess.call(cmake_configuration_cmd):
return 1
if subprocess.call(['make', 'VERBOSE=1']):
return 1
if emulator:
if subprocess.call(['ctest']):
return 1
shutil.copy('a.out' + exe_suffix, build_dir)
return 0
def test_source(source, language, build_system, emulator, linker_flags,
exe_suffix, debug):
result = 0
cwd = os.getcwd()
build_dir = tempfile.mkdtemp()
os.chdir(build_dir)
if build_system == 'None':
result += test_none_build_system(build_dir, language, source,
linker_flags, exe_suffix)
elif build_system == 'CMake':
result += test_cmake_build_system(build_dir, language, source, emulator,
linker_flags, exe_suffix)
else:
print('Unknown build system: ' + build_system)
result += 1
if emulator:
cmd = emulator
cmd += ' ' + os.path.join(build_dir, 'a.out' + exe_suffix)
print('Running ' + cmd + '...')
sys.stdout.flush()
result += subprocess.call(cmd, shell=True)
os.chdir(cwd)
if not debug:
print('Deleting temporary build directory ' + build_dir)
shutil.rmtree(build_dir)
else:
print('Keeping temporary build directory ' + build_dir)
sys.stdout.flush()
return result
def test_build_system(test_dir, language, build_system, emulator, linker_flags,
exe_suffix, debug):
print('\n\n--------------------------------------------------------')
print('Testing ' + build_system + ' build system with the ' +
language + ' language\n')
sys.stdout.flush()
result = 0
for source in glob.glob(os.path.join(test_dir, language, '*')):
result += test_source(source, language, build_system, emulator,
linker_flags, exe_suffix, debug)
return result
def test_language(test_dir, language, build_systems, emulator, linker_flags,
exe_suffix, debug):
result = 0
for build_system in build_systems:
result += test_build_system(test_dir,
language,
build_system,
emulator,
linker_flags,
exe_suffix,
debug)
return result
def run_tests(test_dir, languages=('C', 'C++'), build_systems=('None', 'CMake'),
emulator=None, linker_flags=None, exe_suffix='', debug=False):
"""Run the tests found in test_dir where each directory corresponds to an
entry in languages. Every source within a language directory is built. The
output executable is also run with the emulator if provided."""
result = 0
for language in languages:
result += test_language(test_dir,
language,
build_systems,
emulator,
linker_flags,
exe_suffix,
debug)
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Test the cross-compiler toolchain.')
parser.add_argument('--languages', '-l', nargs='+', default=['C', 'C++'],
help='Languages to test. Options: C C++')
parser.add_argument('--build-systems', '-b', nargs='+', default=['None', 'CMake'],
help='Build systems to test. Options: None CMake')
parser.add_argument('--emulator', '-e',
help='Emulator used to test generated executables')
parser.add_argument('--linker-flags', '-w', nargs='+',
help='Extra compilation linker flags')
parser.add_argument('--exe-suffix', '-s', default='',
help='Suffix for generated executables')
parser.add_argument('--debug', '-d', action='store_true',
help='Do not remove temporary build directory')
args = parser.parse_args()
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.exit(run_tests(test_dir,
languages=args.languages,
build_systems=args.build_systems,
emulator=args.emulator,
linker_flags=args.linker_flags,
exe_suffix=args.exe_suffix,
debug=args.debug) != 0)
|
5004ce50917c348007f06d0f11f8ead579066d8d
|
682459e3cea53ef14e531597dd612d4b5733fbea
|
/src/pymanopt/manifolds/sphere.py
|
74871a64a83b7007bc45700258119915989a3c28
|
[
"BSD-3-Clause"
] |
permissive
|
pymanopt/pymanopt
|
4bcdc2983631befcf88b194449158d5163df37be
|
acb52b216538ba5ed4871f025a0e49080b4475da
|
refs/heads/master
| 2023-09-01T01:50:27.468578
| 2023-04-04T17:52:10
| 2023-04-04T17:52:10
| 45,385,612
| 647
| 155
|
BSD-3-Clause
| 2023-09-13T05:41:44
| 2015-11-02T09:45:08
|
Python
|
UTF-8
|
Python
| false
| false
| 6,938
|
py
|
sphere.py
|
import warnings
import numpy as np
from pymanopt.manifolds.manifold import RiemannianSubmanifold
from pymanopt.tools import extend_docstring
class _SphereBase(RiemannianSubmanifold):
def __init__(self, *shape, name, dimension):
if len(shape) == 0:
raise TypeError("Need at least one dimension.")
self._shape = shape
super().__init__(name, dimension)
@property
def typical_dist(self):
return np.pi
def inner_product(self, point, tangent_vector_a, tangent_vector_b):
return np.tensordot(
tangent_vector_a, tangent_vector_b, axes=tangent_vector_a.ndim
)
def norm(self, point, tangent_vector):
return np.linalg.norm(tangent_vector)
def dist(self, point_a, point_b):
inner = max(min(self.inner_product(point_a, point_a, point_b), 1), -1)
return np.arccos(inner)
def projection(self, point, vector):
return vector - self.inner_product(point, point, vector) * point
to_tangent_space = projection
def weingarten(self, point, tangent_vector, normal_vector):
return (
-self.inner_product(point, point, normal_vector) * tangent_vector
)
def exp(self, point, tangent_vector):
norm = self.norm(point, tangent_vector)
return point * np.cos(norm) + tangent_vector * np.sinc(norm / np.pi)
def retraction(self, point, tangent_vector):
return self._normalize(point + tangent_vector)
def log(self, point_a, point_b):
vector = self.projection(point_a, point_b - point_a)
distance = self.dist(point_a, point_b)
epsilon = np.finfo(np.float64).eps
factor = (distance + epsilon) / (self.norm(point_a, vector) + epsilon)
return factor * vector
def random_point(self):
point = np.random.normal(size=self._shape)
return self._normalize(point)
def random_tangent_vector(self, point):
vector = np.random.normal(size=self._shape)
return self._normalize(self.projection(point, vector))
def transport(self, point_a, point_b, tangent_vector_a):
return self.projection(point_b, tangent_vector_a)
def pair_mean(self, point_a, point_b):
return self._normalize(point_a + point_b)
def zero_vector(self, point):
return np.zeros(self._shape)
def _normalize(self, array):
return array / np.linalg.norm(array)
DOCSTRING_NOTE = """
Note:
The Weingarten map is taken from [AMT2013]_.
"""
@extend_docstring(DOCSTRING_NOTE)
class Sphere(_SphereBase):
r"""The sphere manifold.
Manifold of shape :math:`n_1 \times \ldots \times n_k` tensors with unit
Euclidean norm.
The norm is understood as the :math:`\ell_2`-norm of :math:`\E =
\R^{\sum_{i=1}^k n_i}` after identifying :math:`\R^{n_1 \times \ldots
\times n_k}` with :math:`\E`.
The metric is the one inherited from the usual Euclidean inner product that
induces :math:`\norm{\cdot}_2` on :math:`\E` such that the manifold forms a
Riemannian submanifold of Euclidean space.
Args:
shape: The shape of tensors.
"""
def __init__(self, *shape: int):
if len(shape) == 0:
raise TypeError("Need shape parameters.")
if len(shape) == 1:
(n,) = shape
name = f"Sphere manifold of {n}-vectors"
elif len(shape) == 2:
m, n = shape
name = f"Sphere manifold of {m}x{n} matrices"
else:
name = f"Sphere manifold of shape {shape} tensors"
dimension = np.prod(shape) - 1
super().__init__(*shape, name=name, dimension=dimension)
class _SphereSubspaceIntersectionManifold(_SphereBase):
def __init__(self, projector, name, dimension):
m, n = projector.shape
assert m == n, "projection matrix is not square"
if dimension == 0:
warnings.warn(
"Intersected subspace is 1-dimensional. The manifold "
"therefore has dimension 0 as it only consists of isolated "
"points"
)
self._subspace_projector = projector
super().__init__(n, name=name, dimension=dimension)
def _validate_span_matrix(self, matrix):
if len(matrix.shape) != 2:
raise ValueError("Input array must be 2-dimensional")
num_rows, num_columns = matrix.shape
if num_rows < num_columns:
raise ValueError(
"The span matrix cannot have fewer rows than columns"
)
def projection(self, point, vector):
return self._subspace_projector @ super().projection(point, vector)
def random_point(self):
point = super().random_point()
return self._normalize(self._subspace_projector @ point)
def random_tangent_vector(self, point):
vector = super().random_tangent_vector(point)
return self._normalize(self._subspace_projector @ vector)
@extend_docstring(DOCSTRING_NOTE)
class SphereSubspaceIntersection(_SphereSubspaceIntersectionManifold):
r"""Sphere-subspace intersection manifold.
Manifold of :math:`n`-dimensional vectors with unit :math:`\ell_2`-norm
intersecting an :math:`r`-dimensional subspace of :math:`\R^n`.
The subspace is represented by a matrix of size ``n x r`` whose columns
span the subspace.
Args:
matrix: Matrix whose columns span the intersecting subspace.
"""
def __init__(self, matrix):
self._validate_span_matrix(matrix)
m = matrix.shape[0]
q, _ = np.linalg.qr(matrix)
projector = q @ q.T
subspace_dimension = np.linalg.matrix_rank(projector)
name = (
f"Sphere manifold of {m}-dimensional vectors intersecting a "
f"{subspace_dimension}-dimensional subspace"
)
dimension = subspace_dimension - 1
super().__init__(projector, name, dimension)
@extend_docstring(DOCSTRING_NOTE)
class SphereSubspaceComplementIntersection(
_SphereSubspaceIntersectionManifold
):
r"""Sphere-subspace complement intersection manifold.
Manifold of :math:`n`-dimensional vectors with unit :math:`\ell_2`-norm
that are orthogonal to an :math:`r`-dimensional subspace of :math:`\R^n`.
The subspace is represented by a matrix of size ``n x r`` whose columns
span the subspace.
Args:
matrix: Matrix whose columns span the subspace.
"""
def __init__(self, matrix):
self._validate_span_matrix(matrix)
m = matrix.shape[0]
q, _ = np.linalg.qr(matrix)
projector = np.eye(m) - q @ q.T
subspace_dimension = np.linalg.matrix_rank(projector)
name = (
f"Sphere manifold of {m}-dimensional vectors orthogonal "
f"to a {subspace_dimension}-dimensional subspace"
)
dimension = subspace_dimension - 1
super().__init__(projector, name, dimension)
|
78c42c5678ba29790b967202b012e67ff62c1bb9
|
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
|
/third_party/python/Lib/distutils/tests/test_version.py
|
15f14c7de3f1256aa74199ec01ea0bc978db6fc2
|
[
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"ISC"
] |
permissive
|
jart/cosmopolitan
|
fb11b5658939023977060a7c6c71a74093d9cb44
|
0d748ad58e1063dd1f8560f18a0c75293b9415b7
|
refs/heads/master
| 2023-09-06T09:17:29.303607
| 2023-09-02T03:49:13
| 2023-09-02T03:50:18
| 272,457,606
| 11,887
| 435
|
ISC
| 2023-09-14T17:47:58
| 2020-06-15T14:16:13
|
C
|
UTF-8
|
Python
| false
| false
| 2,614
|
py
|
test_version.py
|
"""Tests for distutils.version."""
import unittest
from distutils.version import LooseVersion
from distutils.version import StrictVersion
from test.support import run_unittest
class VersionTestCase(unittest.TestCase):
def test_prerelease(self):
version = StrictVersion('1.2.3a1')
self.assertEqual(version.version, (1, 2, 3))
self.assertEqual(version.prerelease, ('a', 1))
self.assertEqual(str(version), '1.2.3a1')
version = StrictVersion('1.2.0')
self.assertEqual(str(version), '1.2')
def test_cmp_strict(self):
versions = (('1.5.1', '1.5.2b2', -1),
('161', '3.10a', ValueError),
('8.02', '8.02', 0),
('3.4j', '1996.07.12', ValueError),
('3.2.pl0', '3.1.1.6', ValueError),
('2g6', '11g', ValueError),
('0.9', '2.2', -1),
('1.2.1', '1.2', 1),
('1.1', '1.2.2', -1),
('1.2', '1.1', 1),
('1.2.1', '1.2.2', -1),
('1.2.2', '1.2', 1),
('1.2', '1.2.2', -1),
('0.4.0', '0.4', 0),
('1.13++', '5.5.kw', ValueError))
for v1, v2, wanted in versions:
try:
res = StrictVersion(v1)._cmp(StrictVersion(v2))
except ValueError:
if wanted is ValueError:
continue
else:
raise AssertionError(("cmp(%s, %s) "
"shouldn't raise ValueError")
% (v1, v2))
self.assertEqual(res, wanted,
'cmp(%s, %s) should be %s, got %s' %
(v1, v2, wanted, res))
def test_cmp(self):
versions = (('1.5.1', '1.5.2b2', -1),
('161', '3.10a', 1),
('8.02', '8.02', 0),
('3.4j', '1996.07.12', -1),
('3.2.pl0', '3.1.1.6', 1),
('2g6', '11g', -1),
('0.960923', '2.2beta29', -1),
('1.13++', '5.5.kw', -1))
for v1, v2, wanted in versions:
res = LooseVersion(v1)._cmp(LooseVersion(v2))
self.assertEqual(res, wanted,
'cmp(%s, %s) should be %s, got %s' %
(v1, v2, wanted, res))
def test_suite():
return unittest.makeSuite(VersionTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
a819a4294ccd816f01b7fd23d1fe6f130e96a96c
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/minimum-falling-path-sum.py
|
f9232388f6feb02c1f7323a57bb2a8648acfb183
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 326
|
py
|
minimum-falling-path-sum.py
|
# Time: O(n^2)
# Space: O(1)
class Solution(object):
def minFallingPathSum(self, A):
"""
:type A: List[List[int]]
:rtype: int
"""
for i in xrange(1, len(A)):
for j in xrange(len(A[i])):
A[i][j] += min(A[i-1][max(j-1, 0):j+2])
return min(A[-1])
|
5d960cc204b148f51e80b203cb56547b0e017a38
|
ff4999ae662707a882085b75b8cc377a714f747c
|
/python/fledge/services/core/api/snapshot/plugins.py
|
dc2769a577f22f700438408578f4049fdccb7d93
|
[
"Apache-2.0"
] |
permissive
|
fledge-iot/fledge
|
095631d6a9be10444cdde0a7a3acbb039aabc8bd
|
d4804240a653ab6e324b5949069c0e17bf19e374
|
refs/heads/develop
| 2023-08-22T05:29:07.549634
| 2023-08-21T11:22:19
| 2023-08-21T11:22:19
| 225,508,004
| 111
| 44
|
Apache-2.0
| 2023-09-14T10:42:06
| 2019-12-03T01:58:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,801
|
py
|
plugins.py
|
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
import os
from aiohttp import web
from fledge.services.core.snapshot import SnapshotPluginBuilder
from fledge.common.common import _FLEDGE_ROOT, _FLEDGE_DATA
from fledge.common.web.middleware import has_permission
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_help = """
-------------------------------------------------------------------------
| GET POST | /fledge/snapshot/plugins |
| PUT DELETE | /fledge/snapshot/plugins/{id} |
-------------------------------------------------------------------------
"""
@has_permission("admin")
async def get_snapshot(request):
""" get list of available snapshots
:Example:
curl -X GET http://localhost:8081/fledge/snapshot/plugins
When auth is mandatory:
curl -X GET http://localhost:8081/fledge/snapshot/plugins -H "authorization: <token>"
"""
# Get snapshot directory path
snapshot_dir = _get_snapshot_dir()
valid_extension = '.tar.gz'
sorted_list = []
if os.path.isdir(snapshot_dir):
for root, dirs, files in os.walk(snapshot_dir):
valid_files = list(
filter(lambda f: f.endswith(valid_extension), files))
list_files = list(map(
lambda x: {"id": x.split("snapshot-plugin-")[1].split(".tar.gz")[0],
"name": x}, valid_files))
sorted_list = sorted(list_files, key=lambda k: k['id'], reverse=True)
return web.json_response({"snapshots": sorted_list})
@has_permission("admin")
async def post_snapshot(request):
""" Create a snapshot by name
:Example:
curl -X POST http://localhost:8081/fledge/snapshot/plugins
When auth is mandatory:
curl -X POST http://localhost:8081/fledge/snapshot/plugins -H "authorization: <token>"
"""
try:
snapshot_dir = _get_snapshot_dir()
snapshot_id, snapshot_name = await SnapshotPluginBuilder(
snapshot_dir).build()
except Exception as ex:
raise web.HTTPInternalServerError(
reason='Snapshot could not be created. {}'.format(str(ex)))
else:
return web.json_response({
"message": "snapshot id={}, file={} created successfully.".format(
snapshot_id, snapshot_name)})
@has_permission("admin")
async def put_snapshot(request):
"""extract a snapshot
:Example:
curl -X PUT http://localhost:8081/fledge/snapshot/plugins/1554204238
When auth is mandatory:
curl -X PUT http://localhost:8081/fledge/snapshot/plugins/1554204238 -H "authorization: <token>"
"""
try:
snapshot_id = request.match_info.get('id', None)
snapshot_name = "snapshot-plugin-{}.tar.gz".format(snapshot_id)
try:
snapshot_id = int(snapshot_id)
except:
raise ValueError('Invalid snapshot id: {}'.format(snapshot_id))
if not os.path.isdir(_get_snapshot_dir()):
raise web.HTTPNotFound(reason="No snapshot found.")
snapshot_dir = _get_snapshot_dir()
for root, dirs, files in os.walk(snapshot_dir):
if str(snapshot_name) not in files:
raise web.HTTPNotFound(reason='{} not found'.format(snapshot_name))
p = "{}/{}".format(snapshot_dir, snapshot_name)
SnapshotPluginBuilder(snapshot_dir).extract_files(p)
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as ex:
raise web.HTTPInternalServerError(
reason='Snapshot {} could not be restored. {}'.format(snapshot_name,
str(ex)))
else:
return web.json_response(
{"message": "snapshot {} restored successfully.".format(
snapshot_name)})
@has_permission("admin")
async def delete_snapshot(request):
"""delete a snapshot
:Example:
curl -X DELETE http://localhost:8081/fledge/snapshot/plugins/1554204238
When auth is mandatory:
curl -X DELETE http://localhost:8081/fledge/snapshot/plugins/1554204238 -H "authorization: <token>"
"""
try:
snapshot_id = request.match_info.get('id', None)
snapshot_name = "snapshot-plugin-{}.tar.gz".format(snapshot_id)
try:
snapshot_id = int(snapshot_id)
except:
raise ValueError('Invalid snapshot id: {}'.format(snapshot_id))
if not os.path.isdir(_get_snapshot_dir()):
raise web.HTTPNotFound(reason="No snapshot found.")
snapshot_dir = _get_snapshot_dir()
for root, dirs, files in os.walk(_get_snapshot_dir()):
if str(snapshot_name) not in files:
raise web.HTTPNotFound(reason='{} not found'.format(snapshot_name))
p = "{}/{}".format(snapshot_dir, snapshot_name)
os.remove(p)
except ValueError as ex:
raise web.HTTPBadRequest(reason=str(ex))
except Exception as ex:
raise web.HTTPInternalServerError(
reason='Snapshot {} could not be deleted. {}'.format(snapshot_name,
str(ex)))
else:
return web.json_response(
{"message": "snapshot {} deleted successfully.".format(
snapshot_name)})
def _get_snapshot_dir():
if _FLEDGE_DATA:
snapshot_dir = os.path.expanduser(_FLEDGE_DATA + '/snapshots/plugins')
else:
snapshot_dir = os.path.expanduser(
_FLEDGE_ROOT + '/data/snapshots/plugins')
return snapshot_dir
|
500a717ef9f4769d4ad282eefb7b42687e96a2f3
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/stubs/IfNameMain.py
|
eee945a80e548f2fd8b9a581b923abf59bd24496
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
IfNameMain.py
|
if __name__ == '__main__':
import sys
xyzzy = None
|
39ce9550e21de7fb3b4504066506c88f0a96a259
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/gpu/config/DEPS
|
caa21da0ae6f22d9d7efa21ca773e0d165a03067
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 233
|
DEPS
|
include_rules = [
"+components/crash/core/common/crash_key.h",
"+media/media_buildflags.h",
"+skia/buildflags.h",
"+third_party/skia",
"+third_party/vulkan_headers/include/vulkan/vulkan.h",
"+third_party/dawn/include",
]
|
|
2ecd586f842d3197937d1452c66eca5c818f9b47
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/tools/blinkpy/tool/commands/queries_unittest.py
|
eba66b53e4b462f5a909811b3cfd78eb71db1314
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 8,121
|
py
|
queries_unittest.py
|
# Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import unittest
from blinkpy.common.path_finder import WEB_TESTS_LAST_COMPONENT
from blinkpy.common.system.output_capture import OutputCapture
from blinkpy.tool.commands.queries import PrintBaselines, PrintExpectations
from blinkpy.tool.mock_tool import MockBlinkTool
class PrintExpectationsTest(unittest.TestCase):
def run_test(self,
tests,
expected_stdout,
platform='test-win-win7',
**kwargs):
options_defaults = {
'all': False,
'csv': False,
'full': False,
'platform': platform,
'include_keyword': [],
'exclude_keyword': [],
'paths': False,
}
options_defaults.update(kwargs)
options = optparse.Values(dict(**options_defaults))
tool = MockBlinkTool()
tool.port_factory.all_port_names = lambda: [
'test-linux-trusty', 'test-linux-precise',
'test-mac-mac10.11', 'test-mac-mac10.10',
'test-win-win10', 'test-win-win7'
]
command = PrintExpectations()
oc = OutputCapture()
try:
oc.capture_output()
command.execute(options, tests, tool)
finally:
stdout, _, _ = oc.restore_output()
self.assertMultiLineEqual(stdout, expected_stdout)
def test_basic(self):
self.run_test(
['failures/expected/text.html', 'failures/expected/timeout.html'],
('// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'
'failures/expected/timeout.html [ Timeout ]\n'))
def test_multiple(self):
self.run_test([
'failures/unexpected/*/text.html', 'failures/expected/timeout.html'
], ('// For test-win-win10\n'
'failures/expected/timeout.html [ Timeout ]\n'
'failures/unexpected/\*/text.html [ Pass ]\n'
'\n'
'// For test-win-win7\n'
'failures/expected/timeout.html [ Timeout ]\n'
'failures/unexpected/\*/text.html [ Pass ]\n'),
platform='test-win-*')
def test_full(self):
self.run_test(
['failures/expected/text.html', 'failures/expected/timeout.html'],
('// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'
'failures/expected/timeout.html [ Timeout ]\n'),
full=True)
def test_exclude(self):
self.run_test(
['failures/expected/text.html', 'failures/expected/crash.html'],
('// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'),
exclude_keyword=['crash'])
def test_include(self):
self.run_test(
['failures/expected/text.html', 'failures/expected/crash.html'],
('// For test-win-win7\n'
'failures/expected/crash.html [ Crash ]\n'),
include_keyword=['crash'])
def test_csv(self):
self.run_test(
['failures/expected/text.html', 'failures/expected/image.html'],
('test-win-win7,failures/expected/image.html,,,FAIL\n'
'test-win-win7,failures/expected/text.html,,,FAIL\n'),
csv=True)
def test_paths(self):
self.run_test([],
(WEB_TESTS_LAST_COMPONENT + '/TestExpectations\n' +
WEB_TESTS_LAST_COMPONENT + '/NeverFixTests\n' +
WEB_TESTS_LAST_COMPONENT + '/StaleTestExpectations\n' +
WEB_TESTS_LAST_COMPONENT + '/SlowTests\n'),
paths=True)
class PrintBaselinesTest(unittest.TestCase):
def setUp(self):
self.oc = None
self.tool = MockBlinkTool()
self.test_port = self.tool.port_factory.get('test-win-win7')
self.tool.port_factory.get = lambda port_name=None: self.test_port
self.tool.port_factory.all_port_names = lambda: [
'test-linux-trusty', 'test-linux-precise',
'test-mac-mac10.11', 'test-mac-mac10.10',
'test-win-win10', 'test-win-win7'
]
def tearDown(self):
if self.oc:
self.restore_output()
def capture_output(self):
self.oc = OutputCapture()
self.oc.capture_output()
def restore_output(self):
stdout, stderr, logs = self.oc.restore_output()
self.oc = None
return (stdout, stderr, logs)
def test_basic(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({
'all': False,
'include_virtual_tests': False,
'csv': False,
'platform': None
})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout, ('// For test-win-win7\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'))
def test_multiple(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({
'all': False,
'include_virtual_tests': False,
'csv': False,
'platform': 'test-win-*'
})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout, ('// For test-win-win10\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'
'\n'
'// For test-win-win7\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'))
def test_csv(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({
'all': False,
'platform': '*win7',
'csv': True,
'include_virtual_tests': False
})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout, (
'test-win-win7,passes/text.html,None,png,passes/text-expected.png,None\n'
'test-win-win7,passes/text.html,None,txt,passes/text-expected.txt,None\n'
))
|
8c9e87f9bb54e594c9441c1c8ae696c503fbfcd3
|
091155389673325cfe8b0da3dc64c113f1ded707
|
/cvpods/evaluation/__init__.py
|
8e4ebf84b51e301b46498483eaba3ac081c9d8d8
|
[
"Apache-2.0"
] |
permissive
|
Megvii-BaseDetection/cvpods
|
7b7c808257b757d7f94d520ea03b370105fb05eb
|
2deea5dc659371318c8a570c644201d913a83027
|
refs/heads/master
| 2023-03-22T00:26:06.248877
| 2023-03-10T10:05:26
| 2023-03-10T10:05:26
| 318,124,806
| 659
| 91
|
Apache-2.0
| 2023-03-10T10:05:28
| 2020-12-03T08:26:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
__init__.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2019-2021 Megvii Inc. All rights reserved.
from .build import build_evaluator
from .citypersons_evaluation import CityPersonsEvaluator
from .cityscapes_evaluation import CityscapesEvaluator
from .classification_evaluation import ClassificationEvaluator
from .coco_evaluation import COCOEvaluator
from .crowdhuman_evaluation import CrowdHumanEvaluator
from .evaluator import (
DatasetEvaluator,
DatasetEvaluators,
inference_context,
inference_on_dataset,
inference_on_files
)
from .longtail_classification_evaluation import LongTailClassificationEvaluator
from .lvis_evaluation import LVISEvaluator
from .panoptic_evaluation import COCOPanopticEvaluator
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
from .registry import EVALUATOR
from .rotated_coco_evaluation import RotatedCOCOEvaluator
from .sem_seg_evaluation import SemSegEvaluator
from .testing import print_csv_format, verify_results
from .widerface_evaluation import WiderFaceEvaluator
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
559d24aae42b3e0023d06104c82fe2793adfc13d
|
0010b3d8b8f806d6065e1bb1aa3c18f9714001a7
|
/tests/test_config_output.py
|
e693aee9cd1897e47e27a75c2d719ba784fc4a66
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
GalSim-developers/GalSim
|
bfd2d5e57f20874ad81bc735195c5c62efad63eb
|
f1c0319600cc713373f1cea7459171fbf388848e
|
refs/heads/main
| 2023-08-17T07:30:44.583679
| 2023-08-15T02:52:00
| 2023-08-15T02:52:00
| 3,510,804
| 194
| 104
|
NOASSERTION
| 2023-09-12T04:03:38
| 2012-02-22T02:51:45
|
Python
|
UTF-8
|
Python
| false
| false
| 73,383
|
py
|
test_config_output.py
|
# Copyright (c) 2012-2022 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import numpy as np
import os
import shutil
import sys
import logging
import math
import yaml
import json
import re
import glob
import platform
from collections import OrderedDict
from unittest import mock
import galsim
from galsim_test_helpers import *
@timer
def test_fits():
"""Test the default output type = Fits
"""
# Most of the tests in this file write to the 'output' directory. Here we write to a different
# directory and make sure that it properly creates the directory if necessary.
if os.path.exists('output_fits'):
shutil.rmtree('output_fits')
config = {
'image' : {
'type' : 'Single',
'random_seed' : 1234,
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'flux' : 100,
},
'output' : {
'type' : 'Fits',
'nfiles' : 6,
'file_name' : "$'output_fits/test_fits_%d.fits'%file_num",
},
}
logger = logging.getLogger('test_fits')
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
config1 = galsim.config.CopyConfig(config)
im1_list = []
nfiles = 6
first_seed = galsim.BaseDeviate(1234).raw()
for k in range(nfiles):
ud = galsim.UniformDeviate(first_seed + k + 1)
sigma = ud() + 1.
gal = galsim.Gaussian(sigma=sigma, flux=100)
im1 = gal.drawImage(scale=1)
im1_list.append(im1)
galsim.config.BuildFile(config, file_num=k, image_num=k, obj_num=k, logger=logger)
file_name = 'output_fits/test_fits_%d.fits'%k
im2 = galsim.fits.read(file_name)
np.testing.assert_array_equal(im2.array, im1.array)
# Build all files at once
config = galsim.config.CopyConfig(config1)
galsim.config.BuildFiles(nfiles, config)
for k in range(nfiles):
file_name = 'output_fits/test_fits_%d.fits'%k
im2 = galsim.fits.read(file_name)
np.testing.assert_array_equal(im2.array, im1_list[k].array)
# Can also use Process to do this
config = galsim.config.Process(config1)
for k in range(nfiles):
file_name = 'output_fits/test_fits_%d.fits'%k
im2 = galsim.fits.read(file_name)
np.testing.assert_array_equal(im2.array, im1_list[k].array)
# The returned config is modified relative to the original.
assert config['image']['type'] == 'Single' # It has the items from the input.
assert config1['image']['type'] == 'Single'
assert config['image']['random_seed']['type'] == 'Sequence' # Some things are modified.
assert config1['image']['random_seed'] == 1234
assert isinstance(config['rng'], galsim.BaseDeviate) # And some new things
assert 'rng' not in config1
# For the first file, you don't need the file_num.
os.remove('output_fits/test_fits_0.fits')
config = galsim.config.CopyConfig(config1)
galsim.config.BuildFile(config)
im2 = galsim.fits.read('output_fits/test_fits_0.fits')
np.testing.assert_array_equal(im2.array, im1_list[0].array)
# nproc < 0 should automatically determine nproc from ncpu
config = galsim.config.CopyConfig(config1)
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger, new_params={'output.nproc' : -1})
assert 'ncpu = ' in cl.output
# nproc > njobs should drop back to nproc = njobs
config = galsim.config.CopyConfig(config1)
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger, new_params={'output.nproc' : 10})
if galsim.config.UpdateNProc(10, 6, config) > 1:
assert 'There are only 6 jobs to do. Reducing nproc to 6' in cl.output
# There is a feature that we reduce the number of tasks to be < 32767 to avoid problems
# with the multiprocessing.Queue overflowing. That 32767 number is a settable paramter,
# mostly so we can test this without requiring a crazy huge simultation run.
# So set it to 4 here to test it.
galsim.config.util.max_queue_size = 4
config = galsim.config.CopyConfig(config1)
config['output']['nproc'] = 2
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger, new_params={'output.nproc' : 2})
print(cl.output)
if galsim.config.UpdateNProc(10, 6, config) > 1:
assert 'len(tasks) = 6 is more than max_queue_size = 4' in cl.output
for k in range(nfiles):
file_name = 'output_fits/test_fits_%d.fits'%k
im2 = galsim.fits.read(file_name)
np.testing.assert_array_equal(im2.array, im1_list[k].array)
galsim.config.util.max_queue_size = 32767 # Set it back.
# Check that profile outputs something appropriate for multithreading.
# (The single-thread profiling is handled by the galsim executable, which we don't
# bother testing here.)
config = galsim.config.CopyConfig(config1)
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger,
new_params={'profile':True, 'output.nproc': -1})
#print(cl.output)
# Unfortunately, the LoggerProxy doesn't really work right with the string logger used
# by CaptureLog. I tried for a while to figure out how to get it to capture the proxied
# logs and couldn't get it working. So this just checks for an info log before the
# multithreading starts. But with a regular logger, there really is profiling output.
if galsim.config.UpdateNProc(10, 6, config) > 1:
assert "Starting separate profiling for each of the" in cl.output
# Check some public API utility functions
assert galsim.config.GetNFiles(config) == 6
assert galsim.config.GetNImagesForFile(config, 0) == 1
assert galsim.config.GetNObjForFile(config, 0, 0) == [1]
# Check invalid output type
config['output']['type'] = 'invalid'
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildFile(config)
with assert_raises(galsim.GalSimConfigError):
galsim.config.Process(config)
with assert_raises(galsim.GalSimConfigError):
galsim.config.GetNImagesForFile(config, 0)
with assert_raises(galsim.GalSimConfigError):
galsim.config.GetNObjForFile(config, 0, 0)
# Invalid output file
config = galsim.config.CopyConfig(config1)
config['output']['file_name'] = "$'output_fits/test_fits_%d.fits/test_fits.fits'%file_num"
with assert_raises(OSError):
galsim.config.BuildFile(config)
# If there is no output field, it raises an error when trying to do BuildFile.
os.remove('output_fits/test_fits_0.fits')
config = galsim.config.CopyConfig(config1)
del config['output']
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildFile(config)
# However, when run from a real config file, the processing will write a 'root' field,
# which it will use for the default behavior to write to root.fits.
config['root'] = 'output_fits/test_fits_0'
galsim.config.BuildFile(config)
im2 = galsim.fits.read('output_fits/test_fits_0.fits')
np.testing.assert_array_equal(im2.array, im1_list[0].array)
# Check invalid input field
config['input'] = { 'invalid' : {} }
with assert_raises(galsim.GalSimConfigError):
galsim.config.ProcessInput(config)
# Not sure if this is possible, but we have a check in case cpu_count fails, so
# mock this up to make sure we handle it properly (by reverting to nproc = 1.
with mock.patch('multiprocessing.cpu_count', side_effect=RuntimeError()):
config = galsim.config.CopyConfig(config1)
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger, new_params={'output.nproc' : -1})
assert 'Using single process' in cl.output
@timer
def test_multifits():
"""Test the output type = MultiFits
"""
config = {
'image' : {
'type' : 'Single',
'random_seed' : 1234,
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'flux' : 100,
},
'output' : {
'type' : 'MultiFits',
'nimages' : 6,
'file_name' : 'output/test_multifits.fits'
},
}
im1_list = []
nimages = 6
first_seed = galsim.BaseDeviate(1234).raw()
for k in range(nimages):
ud = galsim.UniformDeviate(first_seed + k + 1)
sigma = ud() + 1.
gal = galsim.Gaussian(sigma=sigma, flux=100)
im1 = gal.drawImage(scale=1)
im1_list.append(im1)
print('multifit image shapes = ',[im.array.shape for im in im1_list])
assert galsim.config.GetNFiles(config) == 1
assert galsim.config.GetNImagesForFile(config, 0) == 6
assert galsim.config.GetNObjForFile(config, 0, 0) == [1, 1, 1, 1, 1, 1]
galsim.config.Process(config)
im2_list = galsim.fits.readMulti('output/test_multifits.fits')
for k in range(nimages):
np.testing.assert_array_equal(im2_list[k].array, im1_list[k].array)
# nimages = 1 is allowed
config['output']['nimages'] = 1
galsim.config.Process(config)
im3_list = galsim.fits.readMulti('output/test_multifits.fits')
assert len(im3_list) == 1
np.testing.assert_array_equal(im3_list[0].array, im1_list[0].array)
# Check error message for missing nimages
del config['output']['nimages']
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildFile(config)
# Also if there is an input field that doesn't have nobj capability
config['input'] = { 'dict' : { 'dir' : 'config_input', 'file_name' : 'dict.p' } }
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildFile(config)
# However, an input field that does have nobj will return something for nobjects.
# This catalog has 3 rows, so equivalent to nobjects = 3
config = galsim.config.CleanConfig(config)
config['input'] = { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' } }
galsim.config.BuildFile(config)
im4_list = galsim.fits.readMulti('output/test_multifits.fits')
assert len(im4_list) == 3
for k in range(3):
np.testing.assert_array_equal(im4_list[k].array, im1_list[k].array)
@timer
def test_datacube():
"""Test the output type = DataCube
"""
config = {
'image' : {
'type' : 'Single',
'random_seed' : 1234,
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'flux' : 100,
},
'output' : {
'type' : 'DataCube',
'nimages' : 6,
'file_name' : 'output/test_datacube.fits'
},
}
im1_list = []
nimages = 6
b = None
first_seed = galsim.BaseDeviate(1234).raw()
for k in range(nimages):
ud = galsim.UniformDeviate(first_seed + k + 1)
sigma = ud() + 1.
gal = galsim.Gaussian(sigma=sigma, flux=100)
if b is None:
im1 = gal.drawImage(scale=1)
b = im1.bounds
else:
im1 = gal.drawImage(bounds=b, scale=1)
im1_list.append(im1)
print('datacube image shapes = ',[im.array.shape for im in im1_list])
assert galsim.config.GetNFiles(config) == 1
assert galsim.config.GetNImagesForFile(config, 0) == 6
assert galsim.config.GetNObjForFile(config, 0, 0) == [1, 1, 1, 1, 1, 1]
galsim.config.Process(config)
im2_list = galsim.fits.readCube('output/test_datacube.fits')
for k in range(nimages):
np.testing.assert_array_equal(im2_list[k].array, im1_list[k].array)
# nimages = 1 is allowed
config['output']['nimages'] = 1
galsim.config.Process(config)
im3_list = galsim.fits.readCube('output/test_datacube.fits')
assert len(im3_list) == 1
np.testing.assert_array_equal(im3_list[0].array, im1_list[0].array)
# Check error message for missing nimages
del config['output']['nimages']
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildFile(config)
# Also if there is an input field that doesn't have nobj capability
config['input'] = { 'dict' : { 'dir' : 'config_input', 'file_name' : 'dict.p' } }
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildFile(config)
# However, an input field that does have nobj will return something for nobjects.
# This catalog has 3 rows, so equivalent to nobjects = 3
config = galsim.config.CleanConfig(config)
config['input'] = { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' } }
galsim.config.BuildFile(config)
im4_list = galsim.fits.readCube('output/test_datacube.fits')
assert len(im4_list) == 3
for k in range(3):
np.testing.assert_array_equal(im4_list[k].array, im1_list[k].array)
# DataCubes cannot include weight (or any other) extra outputs as additional hdus.
# It should raise an exception if you try.
config['output']['weight'] = { 'hdu' : 1 }
config['output']['badpix'] = { 'file_name' : 'output/test_datacube_bp.fits' }
config['image']['noise'] = { 'type' : 'Gaussian', 'variance' : 0.1 }
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildFile(config)
# But if both weight and badpix are files, then it should work.
config['output']['weight'] = { 'file_name' : 'output/test_datacube_wt.fits' }
galsim.config.BuildFile(config)
im5_list = galsim.fits.readCube('output/test_datacube.fits')
assert len(im5_list) == 3
for k in range(3):
rng = galsim.UniformDeviate(first_seed + k + 1)
rng.discard(1)
im1_list[k].addNoise(galsim.GaussianNoise(sigma=0.1**0.5, rng=rng))
np.testing.assert_array_equal(im5_list[k].array, im1_list[k].array)
im5_wt = galsim.fits.read('output/test_datacube_wt.fits')
im5_bp = galsim.fits.read('output/test_datacube_bp.fits')
np.testing.assert_array_equal(im5_wt.array, 10)
np.testing.assert_array_equal(im5_bp.array, 0)
@timer
def test_skip():
"""Test the skip and noclobber options
"""
config = {
'image' : {
'type' : 'Single',
'random_seed' : 1234,
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'flux' : 100,
},
'output' : {
'nfiles' : 6,
'file_name' : "$'output/test_skip_%d.fits'%file_num",
'skip' : { 'type' : 'Random', 'p' : 0.4 }
},
}
im1_list = []
skip_list = []
nfiles = 6
first_seed = galsim.BaseDeviate(1234).raw()
for k in range(nfiles):
file_name = 'output/test_skip_%d.fits'%k
if os.path.exists(file_name):
os.remove(file_name)
ud_file = galsim.UniformDeviate(first_seed + k)
if ud_file() < 0.4:
print('skip k = ',k)
skip_list.append(True)
else:
skip_list.append(False)
ud = galsim.UniformDeviate(first_seed + k + 1)
sigma = ud() + 1.
gal = galsim.Gaussian(sigma=sigma, flux=100)
im1 = gal.drawImage(scale=1)
im1_list.append(im1)
galsim.config.Process(config)
for k in range(nfiles):
file_name = 'output/test_skip_%d.fits'%k
if skip_list[k]:
assert not os.path.exists(file_name)
else:
im2 = galsim.fits.read(file_name)
np.testing.assert_array_equal(im2.array, im1_list[k].array)
# Build the ones we skipped using noclobber option
del config['output']['skip']
config['output']['noclobber'] = True
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger)
assert "Skipping file 1 = output/test_skip_1.fits because output.noclobber" in cl.output
assert "Skipping file 3 = output/test_skip_3.fits because output.noclobber" in cl.output
assert "Skipping file 5 = output/test_skip_5.fits because output.noclobber" in cl.output
for k in range(nfiles):
file_name = 'output/test_skip_%d.fits'%k
im2 = galsim.fits.read(file_name)
np.testing.assert_array_equal(im2.array, im1_list[k].array)
# Another way to skip files is to split the work into several jobs
config['output']['noclobber'] = False
for k in range(nfiles):
file_name = 'output/test_skip_%d.fits'%k
if os.path.exists(file_name): os.remove(file_name)
galsim.config.Process(config, njobs=3, job=3)
for k in range(nfiles):
file_name = 'output/test_skip_%d.fits'%k
if k <= 3:
assert not os.path.exists(file_name)
else:
im2 = galsim.fits.read(file_name)
np.testing.assert_array_equal(im2.array, im1_list[k].array)
with CaptureLog() as cl:
galsim.config.Process(config, njobs=3, job=3, logger=cl.logger)
assert "Splitting work into 3 jobs. Doing job 3" in cl.output
assert "Building 2 out of 6 total files: file_num = 4 .. 5" in cl.output
# job < 1 or job > njobs is invalid
with assert_raises(galsim.GalSimValueError):
galsim.config.Process(config, njobs=3, job=0)
with assert_raises(galsim.GalSimValueError):
galsim.config.Process(config, njobs=3, job=4)
# Also njobs < 1 is invalid
with assert_raises(galsim.GalSimValueError):
galsim.config.Process(config, njobs=0)
@timer
def test_extra_wt():
"""Test the extra weight and badpix fields
"""
nfiles = 6
config = {
'image' : {
'type' : 'Single',
'random_seed' : 1234,
'pixel_scale' : 0.4,
'noise' : { 'type' : 'Poisson', 'sky_level_pixel' : '$0.7 + image_num' }
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'flux' : 100,
},
'output' : {
'nfiles' : nfiles,
'file_name' : "$'output/test_main_%d.fits'%file_num",
'weight' : { 'file_name' : "$'output/test_wt_%d.fits'%file_num" },
'badpix' : { 'file_name' : "$'output/test_bp_%d.fits'%file_num" },
},
}
galsim.config.Process(config)
main_im = [ galsim.fits.read('output/test_main_%d.fits'%k) for k in range(nfiles) ]
for k in range(nfiles):
im_wt = galsim.fits.read('output/test_wt_%d.fits'%k)
np.testing.assert_almost_equal(im_wt.array, 1./(0.7 + k))
im_bp = galsim.fits.read('output/test_bp_%d.fits'%k)
np.testing.assert_array_equal(im_bp.array, 0)
os.remove('output/test_main_%d.fits'%k)
# If noclobber = True, don't overwrite existing file.
config['noise'] = { 'type' : 'Poisson', 'sky_level_pixel' : 500 }
config['output']['noclobber'] = True
galsim.config.RemoveCurrent(config)
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger)
assert 'Not writing weight file 0 = output/test_wt_0.fits' in cl.output
for k in range(nfiles):
im = galsim.fits.read('output/test_main_%d.fits'%k)
np.testing.assert_equal(im.array, main_im[k].array)
im_wt = galsim.fits.read('output/test_wt_%d.fits'%k)
np.testing.assert_almost_equal(im_wt.array, 1./(0.7 + k))
# Can also add these as extra hdus rather than separate files.
config['output']['noclobber'] = False
config['output']['weight'] = { 'hdu' : 1 }
config['output']['badpix'] = { 'hdu' : 2 }
galsim.config.RemoveCurrent(config)
galsim.config.Process(config)
for k in range(nfiles):
im_wt = galsim.fits.read('output/test_main_%d.fits'%k, hdu=1)
np.testing.assert_almost_equal(im_wt.array, 1./(0.7 + k))
im_bp = galsim.fits.read('output/test_main_%d.fits'%k, hdu=2)
np.testing.assert_array_equal(im_bp.array, 0)
config['output']['badpix'] = { 'hdu' : 0 }
galsim.config.RemoveCurrent(config)
with assert_raises(galsim.GalSimConfigError):
galsim.config.Process(config, except_abort=True)
config['output']['badpix'] = { 'hdu' : 1 }
with assert_raises(galsim.GalSimConfigError):
galsim.config.Process(config, except_abort=True)
config['output']['badpix'] = { 'hdu' : 3 }
with assert_raises(galsim.GalSimConfigError):
galsim.config.Process(config, except_abort=True)
# If include_obj_var = True, then weight image includes signal.
config['output']['weight']['include_obj_var'] = True
config['output']['badpix'] = { 'hdu' : 2 }
config['output']['nproc'] = 2
galsim.config.RemoveCurrent(config)
galsim.config.Process(config)
first_seed = galsim.BaseDeviate(1234).raw()
for k in range(nfiles):
ud = galsim.UniformDeviate(first_seed + k + 1)
sigma = ud() + 1.
gal = galsim.Gaussian(sigma=sigma, flux=100)
im = gal.drawImage(scale=0.4)
im_wt = galsim.fits.read('output/test_main_%d.fits'%k, hdu=1)
np.testing.assert_almost_equal(im_wt.array, 1./(0.7 + k + im.array))
# It is permissible for weight, badpix to have no output. Some use cases require building
# the weight and/or badpix information even if it is not associated with any output.
config['output']['weight'] = {}
config['output']['badpix'] = {}
galsim.config.RemoveCurrent(config)
galsim.config.Process(config)
for k in range(nfiles):
assert_raises(OSError, galsim.fits.read, 'output/test_main_%d.fits'%k, hdu=1)
os.remove('output/test_wt_%d.fits'%k)
os.remove('output/test_main_%d.fits'%k)
# Can also have both outputs
config['output']['weight'] = { 'file_name': "$'output/test_wt_%d.fits'%file_num", 'hdu': 1 }
galsim.config.RemoveCurrent(config)
galsim.config.Process(config, except_abort=True)
for k in range(nfiles):
im_wt1 = galsim.fits.read('output/test_wt_%d.fits'%k)
np.testing.assert_almost_equal(im_wt1.array, 1./(0.7 + k))
im_wt2 = galsim.fits.read('output/test_main_%d.fits'%k, hdu=1)
np.testing.assert_almost_equal(im_wt2.array, 1./(0.7 + k))
# Other such use cases would access the final weight or badpix image using GetFinalExtraOutput
galsim.config.BuildFile(config)
wt = galsim.config.extra.GetFinalExtraOutput('weight', config)
np.testing.assert_almost_equal(wt[0].array, 1./0.7)
# If the image is a Scattered type, then the weight and badpix images are built by a
# different code path.
config = {
'image' : {
'type' : 'Scattered',
'random_seed' : 1234,
'pixel_scale' : 0.4,
'size' : 64,
'noise' : { 'type' : 'Poisson', 'sky_level_pixel' : '$0.7 + image_num' },
'nobjects' : 1,
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'flux' : 100,
},
'output' : {
'nfiles' : nfiles,
'file_name' : "$'output/test_main_%d.fits'%file_num",
'weight' : { 'file_name' : "$'output/test_wt_%d.fits'%file_num" },
'badpix' : { 'file_name' : "$'output/test_bp_%d.fits'%file_num" },
},
}
galsim.config.Process(config)
for k in range(nfiles):
im_wt = galsim.fits.read('output/test_wt_%d.fits'%k)
np.testing.assert_almost_equal(im_wt.array, 1./(0.7 + k))
im_bp = galsim.fits.read('output/test_bp_%d.fits'%k)
np.testing.assert_array_equal(im_bp.array, 0)
# If include_obj_var = True, then weight image includes signal.
config['output']['weight']['include_obj_var'] = True
config['output']['nproc'] = 2
galsim.config.RemoveCurrent(config)
galsim.config.Process(config)
first_seed = galsim.BaseDeviate(1234).raw()
for k in range(nfiles):
ud = galsim.UniformDeviate(first_seed + k + 1)
x = ud() * 63 + 1
y = ud() * 63 + 1
ix = int(math.floor(x+1))
iy = int(math.floor(y+1))
dx = x-ix+0.5
dy = y-iy+0.5
sigma = ud() + 1.
gal = galsim.Gaussian(sigma=sigma, flux=100)
im = galsim.ImageF(64,64)
stamp = gal.drawImage(scale=0.4, offset=(dx,dy))
stamp.setCenter(ix,iy)
b = im.bounds & stamp.bounds
im[b] = stamp[b]
im_wt = galsim.fits.read('output/test_wt_%d.fits'%k)
np.testing.assert_almost_equal(im_wt.array, 1./(0.7 + k + im.array))
# If both output.nproc and image.nproc, then only use output.nproc
config['image']['nproc' ] = -1
config['image']['nobjects'] = 5
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger)
#print(cl.output)
#assert 'Already multiprocessing. Ignoring image.nproc' in cl.output
# Note: This doesn't show up because cl.logger doesn't get through the multiprocessing,
# but it does ignore image.nproc > 1.
# Do it manually to confirm.
config['current_nproc'] = 2
with CaptureLog() as cl:
nproc = galsim.config.UpdateNProc(2, 5, config, logger=cl.logger)
assert 'Already multiprocessing. Ignoring image.nproc' in cl.output
assert nproc == 1
@timer
def test_extra_psf():
"""Test the extra psf field
"""
nfiles = 6
config = {
'image' : {
'type' : 'Scattered',
'random_seed' : 1234,
'nobjects' : 1,
'pixel_scale' : 0.4,
'size' : 64,
'stamp_size' : 25,
'image_pos' : { 'type' : 'XY', # Some of these are intentionally off the imgae.
'x' : { 'type': 'Random', 'min': -30, 'max': 100 },
'y' : { 'type': 'Random', 'min': -30, 'max': 100 } },
'offset' : { 'type' : 'XY',
'x' : { 'type': 'Random', 'min': -0.5, 'max': 0.5 },
'y' : { 'type': 'Random', 'min': -0.5, 'max': 0.5 } },
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'shift' : { 'type' : 'XY',
'x' : { 'type': 'Random', 'min': -1, 'max': 1 },
'y' : { 'type': 'Random', 'min': -1, 'max': 1 } },
'flux' : 100,
},
'psf' : {
'type' : 'Moffat',
'beta' : 3.5,
'fwhm' : { 'type': 'Random', 'min': 0.5, 'max': 0.9 },
},
'output' : {
'nfiles' : nfiles,
'file_name' : "$'output/test_gal_%d.fits'%file_num",
'psf' : { 'file_name' : "$'output/test_psf_%d.fits'%file_num", }
},
}
for f in glob.glob('output/test_psf_*.fits'): os.remove(f)
for f in glob.glob('output/test_gal_*.fits'): os.remove(f)
galsim.config.Process(config)
gal_center = []
gal_dxy = []
gal_shift = []
gal_offset = []
psf_fwhm = []
first_seed = galsim.BaseDeviate(1234).raw()
for k in range(nfiles):
ud = galsim.UniformDeviate(first_seed + k + 1)
x = ud() * 130 - 30
y = ud() * 130 - 30
ix = int(math.floor(x+0.5))
iy = int(math.floor(y+0.5))
dx = x-ix
dy = y-iy
fwhm = ud() * 0.4 + 0.5
psf = galsim.Moffat(beta=3.5, fwhm=fwhm)
sigma = ud() + 1.
gal = galsim.Gaussian(sigma=sigma, flux=100)
shift_x = ud() * 2. - 1.
shift_y = ud() * 2. - 1.
gal = gal.shift(shift_x, shift_y)
offset_x = ud() - 0.5
offset_y = ud() - 0.5
# Store values for later loops
gal_center.append( (ix,iy) )
gal_dxy.append( (dx,dy) )
gal_shift.append( (shift_x, shift_y) )
gal_offset.append( (offset_x, offset_y) )
psf_fwhm.append(fwhm)
final = galsim.Convolve(gal, psf)
im = galsim.ImageF(64,64)
stamp = final.drawImage(scale=0.4, nx=25, ny=25, offset=(offset_x+dx,offset_y+dy))
stamp.setCenter(ix,iy)
b = im.bounds & stamp.bounds
if b.isDefined():
im[b] = stamp[b]
im2 = galsim.fits.read('output/test_gal_%d.fits'%k)
np.testing.assert_almost_equal(im2.array, im.array)
# Default is for the PSF to be centered at (x,y). No shift, no offset. (But still dx,dy)
im.setZero()
stamp = psf.drawImage(scale=0.4, nx=25, ny=25, offset=(dx,dy))
stamp.setCenter(ix,iy)
if b.isDefined():
im[b] = stamp[b]
im2 = galsim.fits.read('output/test_psf_%d.fits'%k)
np.testing.assert_almost_equal(im2.array, im.array)
# Now have the psf shift and offset match the galaxy
config['output']['psf']['shift'] = 'galaxy'
config['output']['psf']['offset'] = 'galaxy'
galsim.config.RemoveCurrent(config)
galsim.config.Process(config)
for k in range(nfiles):
ix, iy = gal_center[k]
dx, dy = gal_dxy[k]
sx, sy = gal_shift[k]
ox, oy = gal_offset[k]
psf = galsim.Moffat(beta=3.5, fwhm=psf_fwhm[k])
psf = psf.shift(sx,sy)
stamp = psf.drawImage(scale=0.4, nx=25, ny=25, offset=(ox+dx,oy+dy))
stamp.setCenter(ix,iy)
im = galsim.ImageF(64,64)
b = im.bounds & stamp.bounds
if b.isDefined():
im[b] = stamp[b]
im2 = galsim.fits.read('output/test_psf_%d.fits'%k)
np.testing.assert_almost_equal(im2.array, im.array)
# Can also define custom shift and/or offset for the psf sepatately from the galaxy.
config['output']['psf']['shift'] = {
'type' : 'XY',
'x' : { 'type': 'Random', 'min': -1, 'max': 1 },
'y' : { 'type': 'Random', 'min': -1, 'max': 1 }
}
config['output']['psf']['offset'] = {
'type' : 'XY',
'x' : { 'type': 'Random', 'min': -0.5, 'max': 0.5 },
'y' : { 'type': 'Random', 'min': -0.5, 'max': 0.5 }
}
# Also, let's test the ability of the extra fields to be in a different directory.
if os.path.exists('output_psf'):
shutil.rmtree('output_psf')
config['output']['psf']['dir'] = 'output_psf'
config['output']['psf']['file_name'] = "$'test_psf_%d.fits'%file_num"
galsim.config.RemoveCurrent(config)
galsim.config.Process(config)
for k in range(nfiles):
ud = galsim.UniformDeviate(first_seed + k + 1)
ud.discard(8) # The ud() calls for the galaxy precede the extra_output calls.
ix, iy = gal_center[k]
dx, dy = gal_dxy[k]
psf = galsim.Moffat(beta=3.5, fwhm=psf_fwhm[k])
shift_x = ud() * 2. - 1.
shift_y = ud() * 2. - 1.
psf = psf.shift(shift_x, shift_y)
offset_x = ud() - 0.5
offset_y = ud() - 0.5
stamp = psf.drawImage(scale=0.4, nx=25, ny=25, offset=(offset_x+dx,offset_y+dy))
stamp.setCenter(ix,iy)
im = galsim.ImageF(64,64)
b = im.bounds & stamp.bounds
if b.isDefined():
im[b] = stamp[b]
im2 = galsim.fits.read('output_psf/test_psf_%d.fits'%k)
np.testing.assert_almost_equal(im2.array, im.array)
# Finally, another mode that is allowed is to only write a single PSF file to correspond to
# multiple image files
config = {
'image' : {
'type' : 'Single',
'random_seed' : 1234,
'size' : 32,
'pixel_scale' : 0.4,
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'flux' : 100,
},
'psf' : {
'type' : 'Moffat',
'beta' : 3.5,
'fwhm' : 0.9,
},
'output' : {
'nfiles' : nfiles,
'file_name' : "$'output/test_gal_%d.fits'%file_num",
'psf' : { 'file_name' : 'output_psf/test_psf.fits' }
},
}
galsim.config.Process(config)
psf = galsim.Moffat(beta=3.5, fwhm=0.9)
im = psf.drawImage(scale=0.4, nx=32, ny=32)
im2 = galsim.fits.read('output_psf/test_psf.fits')
np.testing.assert_almost_equal(im2.array, im.array)
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger)
assert "Not writing psf file 1 = output_psf/test_psf.fits because already written" in cl.output
assert "Not writing psf file 2 = output_psf/test_psf.fits because already written" in cl.output
assert "Not writing psf file 3 = output_psf/test_psf.fits because already written" in cl.output
assert "Not writing psf file 4 = output_psf/test_psf.fits because already written" in cl.output
assert "Not writing psf file 5 = output_psf/test_psf.fits because already written" in cl.output
@timer
def test_extra_psf_sn():
"""Test the signal_to_noise option of the extra psf field
"""
config = {
'image' : {
'random_seed' : 1234,
'pixel_scale' : 0.4,
'size' : 64,
'dtype': 'float',
},
'gal' : {
'type' : 'Gaussian',
'sigma' : 2.3,
'flux' : 100,
},
'psf' : {
'type' : 'Moffat',
'beta' : 3.5,
'fwhm' : 0.7,
'gsparams' : { 'maxk_threshold': 3.e-4 }
},
'output' : {
'psf' : {}
},
}
# First pure psf image with no noise.
gal_image = galsim.config.BuildImage(config)
pure_psf_image = galsim.config.extra.GetFinalExtraOutput('psf', config)[0]
assert gal_image.dtype is np.float64
assert pure_psf_image.dtype is np.float64 # PSF gets dtype from main image
np.testing.assert_almost_equal(pure_psf_image.array.sum(), 1., decimal=6)
# Draw PSF at S/N = 100
# (But first check that an error is raised if noise is missing.
config['output']['psf']['signal_to_noise'] = 100
galsim.config.RemoveCurrent(config)
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildImage(config)
noise_var = 20.
config['image']['noise'] = { 'type' : 'Gaussian', 'variance' : noise_var, }
gal_image = galsim.config.BuildImage(config)
sn100_psf_image = galsim.config.extra.GetFinalExtraOutput('psf', config)[0]
sn100_flux = sn100_psf_image.array.sum()
psf_noise = sn100_psf_image - sn100_flux * pure_psf_image
print('psf_noise.var = ',psf_noise.array.var(), noise_var)
np.testing.assert_allclose(psf_noise.array.var(), noise_var, rtol=0.02)
snr = np.sqrt( np.sum(sn100_psf_image.array**2, dtype=float) / noise_var )
print('snr = ',snr, 100)
np.testing.assert_allclose(snr, 100, rtol=0.25) # Not super accurate for any single image.
# Can also specify different draw_methods.
config['output']['psf']['draw_method'] = 'real_space'
galsim.config.RemoveCurrent(config)
gal_image = galsim.config.BuildImage(config)
real_psf_image = galsim.config.extra.GetFinalExtraOutput('psf', config)[0]
print('real flux = ', real_psf_image.array.sum(), sn100_flux)
np.testing.assert_allclose(real_psf_image.array.sum(), sn100_flux, rtol=1.e-4)
# phot is invalid with signal_to_noise
config['output']['psf']['draw_method'] = 'phot'
galsim.config.RemoveCurrent(config)
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildImage(config)
# Check for other invalid input
config['output']['psf']['draw_method'] = 'input'
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildImage(config)
config['output']['psf']['draw_method'] = 'auto'
config['output']['psf']['flux'] = sn100_flux
with assert_raises(galsim.GalSimConfigError):
galsim.config.BuildImage(config)
# OK to use phot with flux.
config['output']['psf']['draw_method'] = 'phot'
del config['output']['psf']['signal_to_noise']
gal_image = galsim.config.BuildImage(config)
phot_psf_image = galsim.config.extra.GetFinalExtraOutput('psf', config)[0]
print('phot flux = ', phot_psf_image.array.sum(), sn100_flux)
np.testing.assert_allclose(phot_psf_image.array.sum(), sn100_flux, rtol=1.e-4)
@timer
def test_extra_truth():
"""Test the extra truth field
"""
nobjects = 6
config = {
# Custom type in this dir. Lets us use HSM_Shape
'modules' : ['hsm_shape'],
'image' : {
'type' : 'Tiled',
'nx_tiles' : nobjects,
'ny_tiles' : 1,
'stamp_xsize' : 32,
'stamp_ysize' : 32,
'random_seed' : 1234,
'pixel_scale' : 0.2,
'nproc' : 2,
},
'psf' : {
'type': 'Gaussian',
'sigma': 0.5,
},
'gal' : {
'type' : 'List',
'items' : [
{
'type': 'Gaussian',
'sigma': 1.e-6,
# Notably, this has no ellip field.
# The workaround below for setting an effective ellip value in the truth
# catalog to deal with this used to not work.
},
{
'type': 'Gaussian',
'sigma': {
'type': 'Random_float',
'min': 1,
'max': 2,
},
'ellip': {'type': 'EBeta', 'e': 0.2, 'beta': {'type': 'Random'} },
},
],
'flux': { 'type': 'Random', 'min': '$obj_num+1', 'max': '$(obj_num+1) * 4' },
# 1/3 of objects are stars.
'index': '$0 if obj_num % 3 == 0 else 1',
},
'output' : {
'type' : 'Fits',
'file_name' : 'output/test_truth.fits',
'truth' : {
'hdu' : 1,
'columns' : OrderedDict([
('object_id' , 'obj_num'),
('index' , 'gal.index'),
('flux' , '@gal.flux'), # The @ is not required, but allowed.
# Check several different ways to do calculations
('sigma' , '$@gal.items.0.sigma if @gal.index==0 else @gal.items.1.sigma'),
('g' , {
'type': 'Eval',
'str': '0. if @gal.index==0 else (@gal.items.1.ellip).g',
}),
('beta' , '$0. if @gal.index==0 else (@gal.items.1.ellip).beta.rad'),
('hlr' , '$@output.truth.columns.sigma * np.sqrt(2.*math.log(2))'),
('fwhm' , '$(@gal).original.fwhm if @gal.index == 1 else (@gal).fwhm'),
('pos' , 'image_pos'),
# slightly gratuitous here. Use int16 to force a check that np.integer works.
('obj_type_i' , '$np.int16(@gal.index)'),
('obj_type_s' , '$"gal" if @gal.index else "star"'),
# Can also just be a constant value.
('run_num' , 17),
('shape' , { 'type' : 'HSM_Shape' }),
])
}
}
}
galsim.config.ImportModules(config)
galsim.config.Process(config)
sigma = np.empty(nobjects)
flux = np.empty(nobjects)
g = np.empty(nobjects)
beta = np.empty(nobjects)
meas_g1 = np.empty(nobjects)
meas_g2 = np.empty(nobjects)
obj_type_i = np.empty(nobjects, dtype=int)
obj_type_s = [None] * nobjects
first_seed = galsim.BaseDeviate(1234).raw()
for k in range(nobjects):
ud = galsim.UniformDeviate(first_seed + k + 1)
if k%3 == 0:
sigma[k] = 1.e-6
g[k] = 0.
beta[k] = 0.
obj_type_i[k] = 0
obj_type_s[k] = 'star'
gal = galsim.Gaussian(sigma=sigma[k])
else:
sigma[k] = ud() + 1
shear = galsim.Shear(e=0.2, beta=ud() * 2*np.pi * galsim.radians)
g[k] = shear.g
beta[k] = shear.beta.rad
obj_type_i[k] = 1
obj_type_s[k] = 'gal'
gal = galsim.Gaussian(sigma=sigma[k]).shear(shear)
flux[k] = (k+1) * (ud() * 3 + 1)
gal = gal.withFlux(flux[k])
psf = galsim.Gaussian(sigma=0.5)
obj = galsim.Convolve(psf,gal)
meas_shape = obj.drawImage(nx=32,ny=32,scale=0.2).FindAdaptiveMom().observed_shape
meas_g1[k] = meas_shape.g1
meas_g2[k] = meas_shape.g2
file_name = 'output/test_truth.fits'
cat = galsim.Catalog(file_name, hdu=1)
obj_num = np.array(range(nobjects))
np.testing.assert_almost_equal(cat.data['object_id'], obj_num)
np.testing.assert_equal(cat.data['index'], obj_type_i)
np.testing.assert_almost_equal(cat.data['flux'], flux)
np.testing.assert_almost_equal(cat.data['sigma'], sigma)
np.testing.assert_almost_equal(cat.data['g'], g)
np.testing.assert_almost_equal(cat.data['beta'], beta)
np.testing.assert_equal(cat.data['obj_type_i'], obj_type_i)
np.testing.assert_equal(cat.data['obj_type_s'], obj_type_s)
np.testing.assert_almost_equal(cat.data['hlr'], sigma * galsim.Gaussian._hlr_factor)
np.testing.assert_almost_equal(cat.data['fwhm'], sigma * galsim.Gaussian._fwhm_factor)
np.testing.assert_almost_equal(cat.data['pos.x'], obj_num * 32 + 16.5)
np.testing.assert_almost_equal(cat.data['pos.y'], 16.5)
np.testing.assert_almost_equal(cat.data['run_num'], 17)
np.testing.assert_almost_equal(cat.data['shape.g1'], meas_g1)
np.testing.assert_almost_equal(cat.data['shape.g2'], meas_g2)
# If types are not consistent for all objects, raise an error.
# Here it's a float for stars and Angle for galaxies.
config['output']['truth']['columns']['beta'] = (
'$0. if @gal.index==0 else (@gal.items.1.ellip).beta')
del config['image']['nproc']
with CaptureLog(level=1) as cl:
with assert_raises(galsim.GalSimConfigError):
galsim.config.Process(config, logger=cl.logger)
assert "beta has type Angle, but previously had type float" in cl.output
config['output']['truth']['columns']['beta'] = (
'$0. if @gal.index==0 else (@gal.items.1.ellip).beta.rad')
# If we don't use Random_float, the truth catalog can't figure out the type of gal.sigma
# when it's used as @gal.items.1.sigma before being calculated.
# This gives and error, but also a suggestion for how it might be remedied.
config['gal']['items'][1]['sigma'] = {
'type': 'Random', 'min': 1, 'max': 2, 'rng_index_key': 'image_num' }
try:
galsim.config.Process(config)
# This is effectively doing assert_raises, but we want to check the error string.
assert False
except galsim.GalSimConfigError as e:
print(e)
assert 'Consider using an explicit value-typed type name like Random_float' in str(e)
@timer
def test_retry_io():
"""Test the retry_io option
"""
# Make a class that mimics writeMulti, except that it fails about 1/3 of the time.
class FlakyWriter:
def __init__(self, rng):
self.ud = galsim.UniformDeviate(rng)
def writeFile(self, *args, **kwargs):
p = self.ud()
if p < 0.33:
raise OSError("p = %f"%p)
else:
galsim.fits.writeMulti(*args, **kwargs)
# Now make a copy of Fits and ExtraWeight using this writer.
class FlakyFits(galsim.config.OutputBuilder):
def writeFile(self, data, file_name, config, base, logger):
flaky_writer = FlakyWriter(galsim.config.GetRNG(config,base))
flaky_writer.writeFile(data, file_name)
galsim.config.RegisterOutputType('FlakyFits', FlakyFits())
class FlakyWeight(galsim.config.extra_weight.WeightBuilder):
def writeFile(self, file_name, config, base, logger):
flaky_writer = FlakyWriter(galsim.config.GetRNG(config,base))
flaky_writer.writeFile(self.final_data, file_name)
galsim.config.RegisterExtraOutput('flaky_weight', FlakyWeight())
galsim.config.output._sleep_mult = 1.e-10 # Don't take forever testing this.
nfiles = 6
config = {
'image' : {
'type' : 'Single',
'random_seed' : 1234,
},
'gal' : {
'type' : 'Gaussian',
'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 },
'flux' : 100,
},
'output' : {
'type' : 'FlakyFits',
'nfiles' : nfiles,
'retry_io': 5,
'file_name' : "$'output/test_flaky_fits_%d.fits'%file_num",
'flaky_weight' : { 'file_name' : "$'output/test_flaky_wt_%d.fits'%file_num" },
},
}
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger)
#print(cl.output)
assert "File output/test_flaky_fits_0.fits: Caught OSError" in cl.output
assert "This is try 2/6, so sleep for 2 sec and try again." in cl.output
assert "file 0: Wrote FlakyFits to file 'output/test_flaky_fits_0.fits'" in cl.output
assert "File output/test_flaky_wt_4.fits: Caught OSError: " in cl.output
assert "This is try 1/6, so sleep for 1 sec and try again." in cl.output
assert "file 0: Wrote flaky_weight to 'output/test_flaky_wt_0.fits'" in cl.output
# Now the regular versions.
config2 = galsim.config.CopyConfig(config)
config2['output'] = {
'type' : 'Fits',
'nfiles' : nfiles,
'file_name' : "$'output/test_nonflaky_fits_%d.fits'%file_num",
'weight' : { 'file_name' : "$'output/test_nonflaky_wt_%d.fits'%file_num" },
}
galsim.config.Process(config2)
for k in range(nfiles):
im1 = galsim.fits.read('output/test_flaky_fits_%d.fits'%k)
im2 = galsim.fits.read('output/test_nonflaky_fits_%d.fits'%k)
np.testing.assert_array_equal(im1.array, im2.array)
wt1 = galsim.fits.read('output/test_flaky_wt_%d.fits'%k)
wt2 = galsim.fits.read('output/test_nonflaky_wt_%d.fits'%k)
np.testing.assert_array_equal(wt1.array, wt2.array)
# Without retry_io, it will fail, but keep going
del config['output']['retry_io']
galsim.config.RemoveCurrent(config)
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger)
#print(cl.output)
assert "Exception caught for file 0 = output/test_flaky_fits_0.fits" in cl.output
assert "File output/test_flaky_fits_0.fits not written! Continuing on..." in cl.output
assert "file 1: Wrote FlakyFits to file 'output/test_flaky_fits_1.fits'" in cl.output
assert "File 1 = output/test_flaky_fits_1.fits" in cl.output
assert "File 2 = output/test_flaky_fits_2.fits" in cl.output
assert "File 3 = output/test_flaky_fits_3.fits" in cl.output
assert "Exception caught for file 4 = output/test_flaky_fits_4.fits" in cl.output
assert "File output/test_flaky_fits_4.fits not written! Continuing on..." in cl.output
assert "File 5 = output/test_flaky_fits_5.fits" in cl.output
# Also works in nproc > 1 mode
config['output']['nproc'] = 2
galsim.config.RemoveCurrent(config)
with CaptureLog() as cl:
galsim.config.Process(config, logger=cl.logger)
#print(cl.output)
if galsim.config.UpdateNProc(2, nfiles, config) > 1:
assert re.search("Process-.: Exception caught for file 0 = output/test_flaky_fits_0.fits",
cl.output)
assert "File output/test_flaky_fits_0.fits not written! Continuing on..." in cl.output
assert re.search("Process-.: File 1 = output/test_flaky_fits_1.fits", cl.output)
assert re.search("Process-.: File 2 = output/test_flaky_fits_2.fits", cl.output)
assert re.search("Process-.: File 3 = output/test_flaky_fits_3.fits", cl.output)
assert re.search("Process-.: Exception caught for file 4 = output/test_flaky_fits_4.fits",
cl.output)
assert "File output/test_flaky_fits_4.fits not written! Continuing on..." in cl.output
assert re.search("Process-.: File 5 = output/test_flaky_fits_5.fits", cl.output)
# But with except_abort = True, it will stop after the first failure
del config['output']['nproc'] # Otherwise which file fails in non-deterministic.
with CaptureLog() as cl:
try:
galsim.config.Process(config, logger=cl.logger, except_abort=True)
except OSError as e:
assert str(e) == "p = 0.285159"
#print(cl.output)
assert "File output/test_flaky_fits_0.fits not written." in cl.output
@timer
def test_config():
"""Test that configuration files are read, copied, and merged correctly.
"""
config = {
'gal' : { 'type' : 'Gaussian', 'sigma' : 2.3,
'flux' : { 'type' : 'List', 'items' : [ 100, 500, 1000 ] } },
'psf' : { 'type' : 'Convolve',
'items' : [
{'type' : 'Moffat', 'beta' : 3.5, 'fwhm' : 0.9 },
{'type' : 'Airy', 'obscuration' : 0.3, 'lam' : 900, 'diam' : 4. } ] },
'image' : { 'type' : 'Single', 'random_seed' : 1234, },
'output' : { 'type' : 'Fits', 'file_name' : "test.fits", 'dir' : 'None' },
'input' : { 'dict' : { 'dir' : 'config_input', 'file_name' : 'dict.p' } },
'eval_variables' : { 'fpixel_scale' : 0.3 }
}
# Test yaml
yaml_file_name = "output/test_config.yaml"
with open(yaml_file_name, 'w') as fout:
yaml.dump(config, fout, default_flow_style=True)
# String None will be coverted to a real None. Convert here in the comparison dict
config['output']['dir'] = None
config1 = galsim.config.ReadConfig(yaml_file_name)[0]
assert config == dict(config1)
config2 = galsim.config.ReadConfig(yaml_file_name, file_type='yaml')[0]
assert config == dict(config2)
config3 = galsim.config.ReadYaml(yaml_file_name)[0]
assert config == dict(config2)
# Test json
json_file_name = "output/test_config.json"
with open(json_file_name, 'w') as fout:
json.dump(config, fout)
config4 = galsim.config.ReadConfig(json_file_name)[0]
assert config == dict(config4)
config5 = galsim.config.ReadConfig(json_file_name, file_type='json')[0]
assert config == dict(config5)
config6 = galsim.config.ReadJson(json_file_name)[0]
assert config == dict(config6)
# Merging identical dicts, should do nothing
galsim.config.MergeConfig(config1,config2)
assert config == dict(config1)
with CaptureLog() as cl:
galsim.config.MergeConfig(config1,config2,logger=cl.logger)
assert "Not merging key type from the base config" in cl.output
assert "Not merging key items from the base config" in cl.output
# Merging different configs does something, with the first taking precedence on conflicts
del config5['gal']
del config6['psf']
config6['image']['random_seed'] = 1337
galsim.config.MergeConfig(config5, config6)
assert config == config5
# Copying deep copies and removes any existing input_manager
config4['_input_manager'] = 'an input manager'
config7 = galsim.config.CopyConfig(config4)
assert config == config7
# It also works on empty config dicts (gratuitous, but adds some test coverage)
config8 = {}
config9 = galsim.config.CopyConfig(config8)
assert config9 == config8
# Check ParseExtendedKey functionality
d,k = galsim.config.ParseExtendedKey(config,'gal.sigma')
assert d[k] == 2.3
d,k = galsim.config.ParseExtendedKey(config,'gal.flux.items.0')
assert d[k] == 100
d,k = galsim.config.ParseExtendedKey(config,'psf.items.1.diam')
assert d[k] == 4
# Check GetFromConfig functionality
v = galsim.config.GetFromConfig(config,'gal.sigma')
assert v == 2.3
v = galsim.config.GetFromConfig(config,'gal.flux.items.0')
assert v == 100
v = galsim.config.GetFromConfig(config,'psf.items.1.diam')
assert v == 4
# Check SetInConfig functionality
galsim.config.SetInConfig(config,'gal.sigma', 2.8)
assert galsim.config.GetFromConfig(config,'gal.sigma') == 2.8
galsim.config.SetInConfig(config,'gal.flux.items.0', 120)
assert galsim.config.GetFromConfig(config,'gal.flux.items.0') == 120
galsim.config.SetInConfig(config,'psf.items.1.diam', 8)
assert galsim.config.GetFromConfig(config,'psf.items.1.diam') == 8
assert_raises(ValueError, galsim.config.GetFromConfig, config, 'psf.items.lam')
assert_raises(ValueError, galsim.config.GetFromConfig, config, 'psf.items.4')
assert_raises(ValueError, galsim.config.GetFromConfig, config, 'psf.itms.1.lam')
assert_raises(ValueError, galsim.config.SetInConfig, config, 'psf.items.lam', 700)
assert_raises(ValueError, galsim.config.SetInConfig, config, 'psf.items.4', 700)
assert_raises(ValueError, galsim.config.SetInConfig, config, 'psf.itms.1.lam', 700)
# Check the yaml multiple document option.
# Easiest to just read demo6 with both Yaml and Json.
yaml_config_file = os.path.join('..','examples','demo6.yaml')
json_config_file_1 = os.path.join('..','examples','json','demo6a.json')
json_config_file_2 = os.path.join('..','examples','json','demo6b.json')
configs = galsim.config.ReadConfig(yaml_config_file)
config1 = galsim.config.ReadConfig(json_config_file_1)
config2 = galsim.config.ReadConfig(json_config_file_2)
assert len(configs) == 2
assert len(config1) == 1
assert len(config2) == 1
# A few adjustments are required before checking that they are equal.
# json files use '#' for comments
del config1[0]['#']
del config2[0]['#']
# remove the output dirs
del configs[0]['output']['dir']
del configs[1]['output']['dir']
del config1[0]['output']['dir']
del config2[0]['output']['dir']
# They have different parsing for 1e5, 1e6 to either string or float
configs[1]['gal']['flux'] = eval(configs[1]['gal']['flux'])
configs[1]['image']['sky_level'] = eval(configs[1]['image']['sky_level'])
# Now serialize with json to force the same ordering, etc.
s_yaml = json.dumps(configs[0], sort_keys=True)
s_json = json.dumps(config1[0], sort_keys=True)
assert s_yaml == s_json
s_yaml = json.dumps(configs[1], sort_keys=True)
s_json = json.dumps(config2[0], sort_keys=True)
assert s_yaml == s_json
@timer
def test_no_output():
"""Technically, it is permissible to not have an output field.
This is pretty contrived, but make sure it works as intended.
"""
config = {
'gal' : {
'type' : 'Gaussian',
'sigma' : 1.7,
'flux' : 100,
},
'root' : 'output/test_no_output' # The galsim executable sets this to the base name of
# the config file.
}
file_name = 'output/test_no_output.fits'
if os.path.exists(file_name):
os.remove(file_name)
galsim.config.Process(config)
assert os.path.exists(file_name)
im1 = galsim.fits.read(file_name)
im2 = galsim.Gaussian(sigma=1.7,flux=100).drawImage(scale=1)
np.testing.assert_equal(im1.array,im2.array)
@timer
def test_eval_full_word():
"""This test duplicates a bug that was found when using the galsim_extra FocalPlane type.
It's a bit subtle. The FocalPlane builder sets up some eval_variables with extra things
that can be used in Eval items like the center of the exposure, the min/max RA and Dec,
the distance of an object from the center of the exposure, etc.
Two of these are focal_r and focal_rmax. The former is calculated for any given object
and gives the radial distance from the center of the focal plane. The latter gives the
maximum possible radial distance of any possible object (based on the outermost chip
corners).
The bug that turned up was that focal_rmax was accessed when loading an input power_spectrum,
which would also trigger the evaluation of focal_r, since that string was also located in
the eval string. But this led to problems, since focal_r was based on world_pos, but that
was intended to be used with obj_num rngs, which wasn't set up set at the time time input
stuff is processed.
So there are two fixes to this, which this test checks. First, the setup of the file-level
RNG also sets up the object-level RNG properly, so it doesn't matter if focal_r is accessed
at this point. And second, the eval code now matches to the full word, not just any portion
of a word, so shorter eval_variables (focal_r in this case) won't get evaluated gratuitously.
In additon to testing that issue, we also include another feature where we originally ran into
trouble. Namely having the number of objects be random in each exposure, but have the random
number seed for most things repeat for all images in each exposure, which needs to know the
number of objects in the exposure. The salient aspects of this are duplicated here by
using MultiFits with the objects being identical for each image in the file.
"""
# Much of this is copied from the FocalPlane implementation or the focal_quick.yaml file
# in the galsim_extra repo.
config = {
'eval_variables': {
# focal_r is a useful calculation that galaxy/PSF properties might want to depend on.
# It is intended to be accessed as an object property.
'ffocal_r' : {
'type' : 'Eval',
'str' : "math.sqrt(pos.x**2 + pos.y**2)",
'ppos' : {
'type' : 'Eval',
'str' : "galsim.PositionD((uv/galsim.arcsec for uv in world_center.project(world_pos)))",
'cworld_pos' : "@image.world_pos"
}
},
# FocalPlane calculates the below values, including particularly focal_rmax, based on
# the WCS's and sets the value in the config dict for each exposure.
# They may be used by objects in conjunction with focal_r, but in this case it is also
# used by the input PSF power spectrum (below) to set the overall scale of the fft
# grid. This is where the bug related to full words in the Eval code came into play.
'ffocal_rmax' : 25.,
'afov_minra' : '-15 arcsec',
'afov_maxra' : '15 arcsec',
'afov_mindec' : '-15 arcsec',
'afov_maxdec' : '15 arcsec',
'fseeing' : {
'type' : 'RandomGaussian',
'mean' : 0.7,
'sigma' : 0.1,
'index_key' : 'image_num' # Seeing changes each exposure
}
},
'input' : {
'power_spectrum' : {
'e_power_function': '(k**2 + (1./180)**2)**(-11./6.)',
'b_power_function': '@input.power_spectrum.e_power_function',
'units': 'arcsec',
'grid_spacing': 10,
'ngrid': '$math.ceil(2*focal_rmax / @input.power_spectrum.grid_spacing)',
},
},
'image' : {
'type' : 'Scattered',
'xsize' : 100,
'ysize' : 100,
# This is similar to the tricky random number generation issue that we ran into in
# FocalPlane. That repeated for each exp_num, rather than file_num, but the issue
# is basically the same.
'random_seed' : [
# Used for noise and nobjects.
{ 'type' : 'Sequence', 'index_key' : 'obj_num', 'first' : 1234 },
# Used for objects. Repeats sequence for each image in file
{
'type' : 'Eval',
'index_key' : 'obj_num',
'str' : '314159 + start_obj_num + (obj_num - start_obj_num) % nobjects',
'inobjects' : { 'type' : 'Current', 'key' : 'image.nobjects' }
},
],
# We also used to have problems with this being a random value, so keep that feature
# here as well.
'nobjects' : {
'type' : 'RandomPoisson',
'index_key' : 'file_num',
'mean' : 10 # Normally much more of course.
},
'noise' : { 'type' : 'Gaussian', 'sigma' : 10 },
# FocalPlane sets this for each exposure. We'll use the same thing for all files here.
'world_center' : galsim.CelestialCoord(0*galsim.degrees, 0*galsim.degrees),
# focal_r depends on world_pos, so let's copy that as is from the galsim_extra
# config file, focal_quick.yaml, where we used to have problems.
'world_pos': {
'rng_num' : 1,
'type': 'RADec',
'ra': {
'type': 'Radians',
'theta': { 'type': 'Random', 'min': "$fov_minra.rad", 'max': "$fov_maxra.rad" }
},
'dec': {
'type': 'Radians',
'theta': {
'type': 'RandomDistribution',
'function': "math.cos(x)",
'x_min': "$fov_mindec.rad",
'x_max': "$fov_maxdec.rad",
}
}
},
# We have to have a CelestialWCS to use CelestialCoords for world_pos.
# This one is about as simple as it gets.
'wcs': {
'type': 'Tan',
'dudx': 0.26, 'dudy': 0., 'dvdx': 0., 'dvdy': 0.26,
'origin' : galsim.PositionD(50,50),
'ra' : '0 deg', 'dec' : '0 deg',
}
},
'output' : {
# Not using the FocalPlane type, since that's a galsim_extra thing. But we can
# get the same complications in terms of the random number of objects by using
# MultiFits output, and have the random_seed repeat for each image in a file.
'type' : 'MultiFits',
'nimages' : 2,
'nfiles' : 2,
'file_name' : "$'output/test_eval_full_word_{0}.fits'.format(file_num)",
'truth' : {
'file_name' : "$'output/test_eval_full_word_{0}.dat'.format(file_num)",
'columns' : {
'num' : 'obj_num',
'exposure' : 'image_num',
'pos' : 'image_pos',
'ra' : 'image.world_pos.ra',
'dec' : 'image.world_pos.dec',
'flux' : 'gal.flux',
'size' : 'gal.sigma',
'psf_fwhm' : 'psf.fwhm',
}
}
},
'psf' : {
'type' : 'Moffat',
'beta' : 3.0,
# Size of PSF ranges from 0.7 to 0.9 over the focal plane
'fwhm' : '$seeing + 0.2 * (focal_r / focal_rmax)**2',
},
'gal' : {
'rng_num' : 1,
# Keep the galaxy simple, but with random components.
'type' : 'Gaussian',
'sigma' : { 'type' : 'Random', 'min': 0.5, 'max': 1.5 },
'flux' : { 'type' : 'Random', 'min': 5000, 'max': 25000 },
}
}
logger = logging.getLogger('test_eval_full_word')
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
galsim.config.Process(config, logger=logger, except_abort=True)
# First check the truth catalogs
data0 = np.genfromtxt('output/test_eval_full_word_0.dat', names=True, deletechars='')
data1 = np.genfromtxt('output/test_eval_full_word_1.dat', names=True, deletechars='')
assert len(data0) == 18 # 9 obj each for first two exposures
assert len(data1) == 24 # 12 obj each for next two exposures
data00 = data0[:9]
data01 = data0[9:]
data10 = data1[:12]
data11 = data1[12:]
# Check exposure = image_num
np.testing.assert_array_equal(data00['exposure'], 0)
np.testing.assert_array_equal(data01['exposure'], 1)
np.testing.assert_array_equal(data10['exposure'], 2)
np.testing.assert_array_equal(data11['exposure'], 3)
# Check obj_num
np.testing.assert_array_equal(data00['num'], range(0,9))
np.testing.assert_array_equal(data01['num'], range(9,18))
np.testing.assert_array_equal(data10['num'], range(18,30))
np.testing.assert_array_equal(data11['num'], range(30,42))
# Check that galaxy properties are identical within exposures, but different across exposures
for key in ['pos.x', 'pos.y', 'ra.rad', 'dec.rad', 'flux', 'size']:
np.testing.assert_array_equal(data00[key], data01[key])
np.testing.assert_array_equal(data10[key], data11[key])
assert np.all(np.not_equal(data00[key], data10[key][:9]))
# PSFs should all be different, but only in the mean
assert np.all(np.not_equal(data00['psf_fwhm'], data01['psf_fwhm']))
assert np.all(np.not_equal(data10['psf_fwhm'], data11['psf_fwhm']))
assert np.all(np.not_equal(data00['psf_fwhm'], data10['psf_fwhm'][:9]))
np.testing.assert_array_almost_equal(data00['psf_fwhm'] - np.mean(data00['psf_fwhm']),
data01['psf_fwhm'] - np.mean(data01['psf_fwhm']))
np.testing.assert_array_almost_equal(data10['psf_fwhm'] - np.mean(data10['psf_fwhm']),
data11['psf_fwhm'] - np.mean(data11['psf_fwhm']))
# Finally the images should be different, but almost equal, since the different should only
# be in the Gaussian noise.
im00, im01 = galsim.fits.readMulti('output/test_eval_full_word_0.fits')
assert np.all(np.not_equal(im00.array, im01.array))
assert abs(np.mean(im00.array - im01.array)) < 0.1
assert 13.5 < np.std(im00.array - im01.array) < 15 # should be ~10 * sqrt(2)
assert np.max(np.abs(im00.array)) > 200 # Just verify that many values are quite large
im10, im11 = galsim.fits.readMulti('output/test_eval_full_word_1.fits')
assert np.all(np.not_equal(im10.array, im11.array))
assert abs(np.mean(im10.array - im11.array)) < 0.1
assert 13.5 < np.std(im10.array - im11.array) < 15
assert np.max(np.abs(im10.array)) > 200
@timer
def test_timeout():
"""Test the timeout option
"""
config = {
'image' : {
'type' : 'Scattered',
'random_seed' : 1234,
'nobjects' : 5,
'pixel_scale' : 0.3,
'size' : 128,
'image_pos' : { 'type' : 'XY',
'x' : { 'type': 'Random', 'min': 10, 'max': 54 },
'y' : { 'type': 'Random', 'min': 10, 'max': 54 } },
},
'gal' : {
'type' : 'Sersic',
'flux' : 100,
# Note: Making n random means the image creation is moderately slow
# (since a new Hankel transform is done for each one in SersicInfo)
# But don't let max be too large so it's not very slow!
'n' : { 'type': 'Random', 'min': 1, 'max': 2 },
'half_light_radius' : { 'type': 'Random', 'min': 1, 'max': 2 },
},
'psf' : {
'type' : 'Moffat',
'fwhm' : { 'type': 'Random', 'min': 0.3, 'max': 0.6 },
'beta' : { 'type': 'Random', 'min': 1.5, 'max': 6 },
},
'output' : {
'type' : 'Fits',
'nfiles' : 6,
'file_name' : "$'output/test_timeout_%d.fits'%file_num",
},
}
logger = logging.getLogger('test_timeout')
logger.addHandler(logging.StreamHandler(sys.stdout))
#logger.setLevel(logging.DEBUG)
# Single proc:
config1 = galsim.config.CopyConfig(config)
galsim.config.Process(config1, logger=logger)
# nproc in output field.
config2 = galsim.config.CopyConfig(config)
config2['output']['nproc'] = 3
config2['output']['timeout'] = 30 # Still plenty large enough not to timeout.
config2['output']['file_name'] = "$'output/test_timeout_nproc1_%d.fits'%file_num"
galsim.config.Process(config2, logger=logger)
for n in range(6):
im1 = galsim.fits.read('output/test_timeout_%d.fits'%n)
im2 = galsim.fits.read('output/test_timeout_nproc1_%d.fits'%n)
assert im1 == im2
# Check that it behaves sensibly if it hits timeout limit.
# (PyPy doesn't seem to timeout, so skip this on PyPy.)
if platform.python_implementation() != 'PyPy':
config2 = galsim.config.CleanConfig(config2)
config2['output']['timeout'] = 0.001
with CaptureLog() as cl:
with assert_raises(galsim.GalSimError):
galsim.config.Process(config2, logger=cl.logger)
assert 'Multiprocessing timed out waiting for a task to finish.' in cl.output
# nproc in image field.
config2 = galsim.config.CopyConfig(config)
config2['image']['nproc'] = 3
config2['image']['timeout'] = 30
config2['output']['file_name'] = "$'output/test_timeout_nproc2_%d.fits'%file_num"
galsim.config.Process(config2, logger=logger)
for n in range(6):
im1 = galsim.fits.read('output/test_timeout_%d.fits'%n)
im2 = galsim.fits.read('output/test_timeout_nproc2_%d.fits'%n)
assert im1 == im2
# If you use BuildImages, it uses the image nproc and timeout specs, but parallelizes
# over images rather than stamps. So check that.
config2 = galsim.config.CleanConfig(config2)
images = galsim.config.BuildImages(6, config2, logger=logger)
for n, im in enumerate(images):
im1 = galsim.fits.read('output/test_timeout_%d.fits'%n)
assert im1 == im
if platform.python_implementation() != 'PyPy':
# Check that it behaves sensibly if it hits timeout limit.
# This time, it will continue on after each error, but report the error in the log.
config2 = galsim.config.CleanConfig(config2)
config2['image']['timeout'] = 0.001
with CaptureLog() as cl:
galsim.config.Process(config2, logger=cl.logger)
assert 'Multiprocessing timed out waiting for a task to finish.' in cl.output
assert 'File output/test_timeout_nproc2_1.fits not written! Continuing on...' in cl.output
assert 'No files were written. All were either skipped or had errors.' in cl.output
# If you want this to abort, use except_abort=True
config2 = galsim.config.CleanConfig(config2)
with CaptureLog() as cl:
with assert_raises(galsim.GalSimError):
galsim.config.Process(config2, logger=cl.logger, except_abort=True)
assert 'Multiprocessing timed out waiting for a task to finish.' in cl.output
def test_direct_extra_output():
# Test the ability to get extra output directly after calling BuildImage, but
# not the usual higher level functions (Process or BuildFile).
# Thanks to Sid Mau for finding this problem in version 2.4.2.
config = {
'gal': {
'type': 'Exponential',
'half_light_radius': 0.5,
'signal_to_noise': 100,
},
'psf': {
'type': 'Gaussian',
'fwhm': 0.7,
},
'image': {
'type': 'Tiled',
'nx_tiles': 10,
'ny_tiles': 10,
'stamp_size': 32,
'pixel_scale': 0.2,
'noise': {
'type': 'Gaussian',
'sigma': 0.02,
},
'random_seed': 1234,
},
'output': {
'dir': 'output',
'file_name': 'test_direct_extra.fits',
'weight': {
'hdu': 1
},
'badpix': {
'hdu': 2
},
'psf': {
'hdu': 3
},
},
}
# First, get the extras without running the whole file processing.
image = galsim.config.BuildImage(config)
weight = galsim.config.GetFinalExtraOutput('weight', config)[0]
badpix = galsim.config.GetFinalExtraOutput('badpix', config)[0]
psf = galsim.config.GetFinalExtraOutput('psf', config)[0]
# These should be the same as what you get from running BuildFile.
galsim.config.BuildFile(config)
fname = os.path.join('output', 'test_direct_extra.fits')
image1 = galsim.fits.read(fname, hdu=0)
weight1 = galsim.fits.read(fname, hdu=1)
badpix1 = galsim.fits.read(fname, hdu=2)
psf1 = galsim.fits.read(fname, hdu=3)
assert image == image1
assert weight == weight1
assert badpix == badpix1
assert psf == psf1
if __name__ == "__main__":
testfns = [v for k, v in vars().items() if k[:5] == 'test_' and callable(v)]
for testfn in testfns:
testfn()
|
70165302a7fa973f89b454165d93479d74e342d3
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Configuration/Generator/python/concurrentLumisDisable.py
|
647695db431096e4944d4d883c1c8c54e7018bbc
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 571
|
py
|
concurrentLumisDisable.py
|
# list of generator EDModules (C++ type) that do not support concurrentLuminosityBlocks
noConcurrentLumiGenerators = [
"AMPTGeneratorFilter",
"BeamHaloProducer",
"CosMuoGenProducer",
"ExhumeGeneratorFilter",
"Herwig7GeneratorFilter",
"HydjetGeneratorFilter",
"Hydjet2GeneratorFilter",
"PyquenGeneratorFilter",
"Pythia6GeneratorFilter",
"Pythia8EGun",
"Pythia8GeneratorFilter",
"Pythia8HadronizerFilter",
"Pythia8PtAndDxyGun",
"Pythia8PtGun",
"ReggeGribovPartonMCGeneratorFilter",
"SherpaGeneratorFilter",
]
|
90d600733ece5dc0ff2ec263eafdcbffa7c9d15b
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Alignment/OfflineValidation/python/TkAlAllInOneTool/trackSplittingValidationTemplates.py
|
e9c7e9a76cb5cc40cfdadebcc1c39d2d3c3d20ae
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 6,322
|
py
|
trackSplittingValidationTemplates.py
|
######################################################################
######################################################################
TrackSplittingTemplate="""
#adding this ~doubles the efficiency of selection
process.FittingSmootherRKP5.EstimateCut = -1
.oO[subdetselection]Oo.
# Use compressions settings of TFile
# see https://root.cern.ch/root/html534/TFile.html#TFile:SetCompressionSettings
# settings = 100 * algorithm + level
# level is from 1 (small) to 9 (large compression)
# algo: 1 (ZLIB), 2 (LMZA)
# see more about compression & performance: https://root.cern.ch/root/html534/guides/users-guide/InputOutput.html#compression-and-performance
compressionSettings = 207
process.cosmicValidation = cms.EDAnalyzer("CosmicSplitterValidation",
compressionSettings = cms.untracked.int32(compressionSettings),
ifSplitMuons = cms.bool(False),
ifTrackMCTruth = cms.bool(False),
checkIfGolden = cms.bool(False),
splitTracks = cms.InputTag("FinalTrackRefitter","","splitter"),
splitGlobalMuons = cms.InputTag("muons","","splitter"),
originalTracks = cms.InputTag("FirstTrackRefitter","","splitter"),
originalGlobalMuons = cms.InputTag("muons","","Rec")
)
"""
######################################################################
######################################################################
TrackSplittingSequence = "process.cosmicValidation"
######################################################################
######################################################################
trackSplitPlotExecution="""
#make track splitting plots
cp .oO[trackSplitPlotScriptPath]Oo. .
root -x -b -q TkAlTrackSplitPlot.C++
"""
######################################################################
######################################################################
trackSplitPlotTemplate="""
#include "Alignment/OfflineValidation/macros/trackSplitPlot.C"
/****************************************
This can be run directly in root, or you
can run ./TkAlMerge.sh in this directory
It can be run as is, or adjusted to fit
for misalignments or to only make
certain plots
****************************************/
/********************************
To make ALL plots (247 in total):
leave this file as is
********************************/
/**************************************************************************
to make all plots involving a single x or y variable, or both:
Uncomment the line marked (B), and fill in for xvar and yvar
Examples:
xvar = "dxy", yvar = "ptrel" - makes plots of dxy vs Delta_pT/pT
(4 total - profile and resolution,
of Delta_pT/pT and its pull
distribution)
xvar = "all", yvar = "pt" - makes all plots involving Delta_pT
(not Delta_pT/pT)
(30 plots total:
histogram and pull distribution, and
their mean and width as a function
of the 7 x variables)
xvar = "", yvar = "all" - makes all histograms of all y variables
(including Delta_pT/pT)
(16 plots total - 8 y variables,
regular and pull histograms)
**************************************************************************/
/**************************************************************************************
To make a custom selection of plots:
Uncomment the lines marked (C) and this section, and fill in matrix however you want */
/*
Bool_t plotmatrix[xsize][ysize];
void fillmatrix()
{
for (int x = 0; x < xsize; x++)
for (int y = 0; y < ysize; y++)
plotmatrix[x][y] = (.............................);
}
*/
/*
The variables are defined in Alignment/OfflineValidation/macros/trackSplitPlot.h
as follows:
TString xvariables[xsize] = {"", "pt", "eta", "phi", "dz", "dxy", "theta",
"qoverpt"};
TString yvariables[ysize] = {"pt", "pt", "eta", "phi", "dz", "dxy", "theta",
"qoverpt", ""};
Bool_t relativearray[ysize] = {true, false, false, false, false, false, false,
false, false};
Use matrix[x][y] = true to make that plot, and false not to make it.
**************************************************************************************/
/*************************************************************************************
To fit for a misalignment, which can be combined with any other option:
Uncomment the line marked (A) and this section, and choose your misalignment */
/*
TString misalignment = "choose one";
double *values = 0;
double *phases = 0;
//or:
// double values[number of files] = {...};
// double phases[number of files] = {...};
*/
/*
The options for misalignment are sagitta, elliptical, skew, telescope, or layerRot.
If the magnitude and phase of the misalignment are known (i.e. Monte Carlo data using
a geometry produced by the systematic misalignment tool), make values and phases into
arrays, with one entry for each file, to make a plot of the result of the fit vs. the
misalignment value.
phases must be filled in for sagitta, elliptical, and skew if values is;
for the others it has no effect
*************************************************************************************/
void TkAlTrackSplitPlot()
{
TkAlStyle::legendheader = ".oO[legendheader]Oo.";
TkAlStyle::legendoptions = ".oO[legendoptions]Oo.";
TkAlStyle::set(.oO[publicationstatus]Oo., .oO[era]Oo., ".oO[customtitle]Oo.", ".oO[customrighttitle]Oo.");
outliercut = .oO[outliercut]Oo.;
//fillmatrix(); //(C)
subdetector = ".oO[subdetector]Oo.";
makePlots(
.oO[PlottingInstantiation]Oo.
,
//misalignment,values,phases, //(A)
".oO[datadir]Oo./.oO[PlotsDirName]Oo."
//,"xvar","yvar" //(B)
//,plotmatrix //(C)
);
}
"""
|
c2b1023494383e7a9a3ea3782581e63ea7bbaa86
|
2bb263de5f1dc0dbf82d97fd56bc7945255f3ff5
|
/DeBERTa/utils/xtqdm.py
|
2908d0c1ce9683afe2ca1e1050b351fbd3a03f0a
|
[
"MIT"
] |
permissive
|
microsoft/DeBERTa
|
e3d1fa6aba9175c2ca53ce0c51206083180a023d
|
4d7fe0bd4fb3c7d4f4005a7cafabde9800372098
|
refs/heads/master
| 2023-09-04T21:05:30.905341
| 2023-03-25T10:22:59
| 2023-03-25T10:22:59
| 270,730,507
| 1,641
| 207
|
MIT
| 2023-09-12T22:40:57
| 2020-06-08T15:57:14
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
xtqdm.py
|
from tqdm import tqdm
import os
__all__=['xtqdm']
class dummy_tqdm():
def __init__(self, iterable=None, *wargs, **kwargs):
self.iterable = iterable
def __iter__(self):
for d in self.iterable:
yield d
def update(self, *wargs, **kwargs):
pass
def close(self):
pass
def xtqdm(iterable=None, *wargs, **kwargs):
disable = False
if 'disable' in kwargs:
disable = kwargs['disable']
if 'NO_TQDM' in os.environ:
disable = True if os.getenv('NO_TQDM', '0')!='0' else False
if disable:
return dummy_tqdm(iterable, *wargs, **kwargs)
else:
return tqdm(iterable, *wargs, **kwargs)
|
b0bde1eed0fccd6e5bc658b6306b7cdb5a798a69
|
032cde6476486d7200b235f16a1c4f32c99d19b7
|
/tests/test_serializer_field.py
|
460f1df3ca92b0d401619472b214eed5e35d4a63
|
[
"BSD-2-Clause"
] |
permissive
|
mfogel/django-timezone-field
|
ab9be0711409445d85959be80856a5351d08194b
|
06c3c099595d516d17ad4f642e2e221876443352
|
refs/heads/main
| 2023-08-22T05:16:55.379046
| 2023-08-21T00:19:31
| 2023-08-21T00:19:31
| 4,010,596
| 305
| 89
|
BSD-2-Clause
| 2023-09-07T06:51:05
| 2012-04-12T23:48:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
test_serializer_field.py
|
import pytest
from rest_framework import serializers
from timezone_field.rest_framework import TimeZoneSerializerField
@pytest.fixture
def TimeZoneSerializer(use_pytz):
class _TimeZoneSerializer(serializers.Serializer):
# pylint: disable=abstract-method
tz = TimeZoneSerializerField(use_pytz=use_pytz)
yield _TimeZoneSerializer
def test_invalid_str(TimeZoneSerializer, invalid_tz):
serializer = TimeZoneSerializer(data={"tz": invalid_tz})
assert not serializer.is_valid()
# https://github.com/mfogel/django-timezone-field/issues/86
def test_empty_str(TimeZoneSerializer):
serializer = TimeZoneSerializer(data={"tz": ""})
assert not serializer.is_valid()
def test_valid(TimeZoneSerializer, pst, pst_tz):
serializer = TimeZoneSerializer(data={"tz": pst})
assert serializer.is_valid()
assert serializer.validated_data["tz"] == pst_tz
def test_valid_representation(TimeZoneSerializer, pst):
serializer = TimeZoneSerializer(data={"tz": pst})
assert serializer.is_valid()
assert serializer.data["tz"] == pst
def test_valid_with_timezone_object(TimeZoneSerializer, pst, pst_tz):
serializer = TimeZoneSerializer(data={"tz": pst_tz})
assert serializer.is_valid()
assert serializer.data["tz"] == pst
assert serializer.validated_data["tz"] == pst_tz
|
25aff594107a1197bdfcd9842ebc9c06bd0648df
|
6303764a0fb95d1ae9d97e8d8d333eb45adba367
|
/11_decision_trees_random_forests/00_custom_bundle/stooq_jp_stocks.py
|
dc0758138dbb38d05cdaa0231b04d3148445cae7
|
[
"MIT"
] |
permissive
|
PacktPublishing/Machine-Learning-for-Algorithmic-Trading-Second-Edition_Original
|
3e533b352f105ed0cc530364d5f314b20484ae45
|
2225dbfd39079216192460f4350848aa4205c226
|
refs/heads/master
| 2023-01-22T12:48:42.137805
| 2023-01-18T09:11:25
| 2023-01-18T09:11:25
| 221,883,227
| 708
| 289
|
MIT
| 2022-07-26T02:53:31
| 2019-11-15T08:51:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
stooq_jp_stocks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Stefan Jansen'
from pathlib import Path
import os
import numpy as np
import pandas as pd
pd.set_option('display.expand_frame_repr', False)
np.random.seed(42)
zipline_root = None
try:
zipline_root = os.environ['ZIPLINE_ROOT']
except KeyError:
print('Please ensure a ZIPLINE_ROOT environment variable is defined and accessible '
'(or alter the script and manually set the path')
exit()
custom_data_path = Path(zipline_root, 'custom_data')
# custom_data_path = Path('~/.zipline/custom_data').expanduser()
def load_equities():
return pd.read_hdf(custom_data_path / 'stooq.h5', 'jp/equities')
def ticker_generator():
"""
Lazily return (sid, ticker) tuple
"""
return (v for v in load_equities().values)
def data_generator():
for sid, symbol, asset_name in ticker_generator():
df = pd.read_hdf(custom_data_path / 'stooq.h5', 'jp/{}'.format(sid))
start_date = df.index[0]
end_date = df.index[-1]
first_traded = start_date.date()
auto_close_date = end_date + pd.Timedelta(days=1)
exchange = 'XTKS'
yield (sid, df), symbol, asset_name, start_date, end_date, first_traded, auto_close_date, exchange
def metadata_frame():
dtype = [
('symbol', 'object'),
('asset_name', 'object'),
('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('first_traded', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('exchange', 'object'), ]
return pd.DataFrame(np.empty(len(load_equities()), dtype=dtype))
def stooq_jp_to_bundle(interval='1d'):
def ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir
):
metadata = metadata_frame()
def daily_data_generator():
return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in data_generator())
daily_bar_writer.write(daily_data_generator(), show_progress=True)
metadata.dropna(inplace=True)
asset_db_writer.write(equities=metadata)
# empty DataFrame
adjustment_writer.write(splits=pd.read_hdf(custom_data_path / 'stooq.h5', 'jp/splits'))
return ingest
|
9ab0261f4766f40ab646f768c0b6d073fe690304
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/test/test_unpack.py
|
f5ca1d455b5c6f2db608d01848a1a4050fc14eb0
|
[
"Python-2.0",
"CC-BY-4.0",
"MIT"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 3,595
|
py
|
test_unpack.py
|
import doctest
import unittest
doctests = """
Unpack tuple
>>> t = (1, 2, 3)
>>> a, b, c = t
>>> a == 1 and b == 2 and c == 3
True
Unpack list
>>> l = [4, 5, 6]
>>> a, b, c = l
>>> a == 4 and b == 5 and c == 6
True
Unpack implied tuple
>>> a, b, c = 7, 8, 9
>>> a == 7 and b == 8 and c == 9
True
Unpack string... fun!
>>> a, b, c = 'one'
>>> a == 'o' and b == 'n' and c == 'e'
True
Unpack generic sequence
>>> class Seq:
... def __getitem__(self, i):
... if i >= 0 and i < 3: return i
... raise IndexError
...
>>> a, b, c = Seq()
>>> a == 0 and b == 1 and c == 2
True
Single element unpacking, with extra syntax
>>> st = (99,)
>>> sl = [100]
>>> a, = st
>>> a
99
>>> b, = sl
>>> b
100
Now for some failures
Unpacking non-sequence
>>> a, b, c = 7
Traceback (most recent call last):
...
TypeError: cannot unpack non-iterable int object
Unpacking tuple of wrong size
>>> a, b = t
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking tuple of wrong size
>>> a, b = l
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking sequence too short
>>> a, b, c, d = Seq()
Traceback (most recent call last):
...
ValueError: not enough values to unpack (expected 4, got 3)
Unpacking sequence too long
>>> a, b = Seq()
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking a sequence where the test for too long raises a different kind of
error
>>> class BozoError(Exception):
... pass
...
>>> class BadSeq:
... def __getitem__(self, i):
... if i >= 0 and i < 3:
... return i
... elif i == 3:
... raise BozoError
... else:
... raise IndexError
...
Trigger code while not expecting an IndexError (unpack sequence too long, wrong
error)
>>> a, b, c, d, e = BadSeq()
Traceback (most recent call last):
...
test.test_unpack.BozoError
Trigger code while expecting an IndexError (unpack sequence too short, wrong
error)
>>> a, b, c = BadSeq()
Traceback (most recent call last):
...
test.test_unpack.BozoError
Allow unpacking empty iterables
>>> () = []
>>> [] = ()
>>> [] = []
>>> () = ()
Unpacking non-iterables should raise TypeError
>>> () = 42
Traceback (most recent call last):
...
TypeError: cannot unpack non-iterable int object
Unpacking to an empty iterable should raise ValueError
>>> () = [42]
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 0)
"""
__test__ = {'doctests' : doctests}
def load_tests(loader, tests, pattern):
tests.addTest(doctest.DocTestSuite())
return tests
class TestCornerCases(unittest.TestCase):
def test_extended_oparg_not_ignored(self):
# https://github.com/python/cpython/issues/91625
target = "(" + "y,"*400 + ")"
code = f"""def unpack_400(x):
{target} = x
return y
"""
ns = {}
exec(code, ns)
unpack_400 = ns["unpack_400"]
# Warm up the the function for quickening (PEP 659)
for _ in range(30):
y = unpack_400(range(400))
self.assertEqual(y, 399)
if __name__ == "__main__":
unittest.main()
|
16be2a6b284169c18c5382c5457ff837d995f694
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Zhone/MXK/get_arp.py
|
0b70638420887bc324c56bb5181a3a4af98bbaed
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 942
|
py
|
get_arp.py
|
# ---------------------------------------------------------------------
# Zhone.MXK.get_arp
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetarp import IGetARP
class Script(BaseScript):
name = "Zhone.MXK.get_arp"
interface = IGetARP
rx_line = re.compile(
r"^(?P<ip>\d\S+)\s+(?P<mac>\S+)\s+\d+\s+\d+\s+\d+\s+(?P<interface>\S+)\s*\n", re.MULTILINE
)
def execute(self, vrf=None):
r = []
for match in self.rx_line.finditer(self.cli("ip arpshow")):
if match.group("mac") == "<incomplete>" or match.group("interface") == "coreEnd":
continue
r += [match.groupdict()]
return r
|
7e4d1f3ce665cbbb6d2eba9f1c5f6029dcf7d507
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/lenta.py
|
10aac984e4e3be595927a9ba7ef236a506843411
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
lenta.py
|
from .common import InfoExtractor
class LentaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?lenta\.ru/[^/]+/\d+/\d+/\d+/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://lenta.ru/news/2018/03/22/savshenko_go/',
'info_dict': {
'id': '964400',
'ext': 'mp4',
'title': 'Надежду Савченко задержали',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 61,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
# EaglePlatform iframe embed
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
r'vid\s*:\s*["\']?(\d+)', webpage, 'eagleplatform id',
default=None)
if video_id:
return self.url_result(
'eagleplatform:lentaru.media.eagleplatform.com:%s' % video_id,
ie='EaglePlatform', video_id=video_id)
return self.url_result(url, ie='Generic')
|
67222dcf86f17b21e840b885d99c37995d717efe
|
9c2e531ec004adef7d8d07fa0b263b9f2f83cdc6
|
/tests/test_common.py
|
3bc8f4afd9f82a1f04e15582375413784042651a
|
[
"MIT"
] |
permissive
|
davemlz/eemont
|
ac0a19938cd5a9457771ac65d345834ce86faf6b
|
7bddd88aa9ba040eff78009f2cafc55351911023
|
refs/heads/master
| 2023-08-17T03:36:29.013230
| 2023-08-17T00:22:47
| 2023-08-17T00:22:47
| 322,478,954
| 356
| 65
|
MIT
| 2022-04-05T08:11:22
| 2020-12-18T03:33:12
|
Python
|
UTF-8
|
Python
| false
| false
| 836
|
py
|
test_common.py
|
import unittest
import box
import ee
import eemont
ee.Initialize()
class Test(unittest.TestCase):
"""Tests for `eemont` package."""
def test_indices(self):
"""Test the indices function"""
test = eemont.indices()
self.assertIsInstance(test, box.box.Box)
def test_indices_online(self):
"""Test the indices function"""
test = eemont.indices(online=True)
self.assertIsInstance(test, box.box.Box)
def test_listIndices(self):
"""Test the listIndices function"""
test = eemont.listIndices()
self.assertIsInstance(test, list)
def test_listIndices_online(self):
"""Test the listIndices function"""
test = eemont.listIndices(online=True)
self.assertIsInstance(test, list)
if __name__ == "__main__":
unittest.main()
|
6c56db41e2f536e23051486a181c6db19fb991ac
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/adaptation/__init__.py
|
92168edf54e34fcc91501b4cac79c6b2288c4fce
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 11,905
|
py
|
__init__.py
|
"""
References
----------
- :cite:`CIETC1-321994b` : CIE TC 1-32. (1994). CIE 109-1994 A Method of
Predicting Corresponding Colours under Different Chromatic and Illuminance
Adaptations. Commission Internationale de l'Eclairage.
ISBN:978-3-900734-51-0
- :cite:`Fairchild1991a` : Fairchild, M. D. (1991). Formulation and testing
of an incomplete-chromatic-adaptation model. Color Research & Application,
16(4), 243-250. doi:10.1002/col.5080160406
- :cite:`Fairchild2013s` : Fairchild, M. D. (2013). FAIRCHILD'S 1990 MODEL.
In Color Appearance Models (3rd ed., pp. 4418-4495). Wiley. ISBN:B00DAYO8E2
- :cite:`Fairchild2013t` : Fairchild, M. D. (2013). Chromatic Adaptation
Models. In Color Appearance Models (3rd ed., pp. 4179-4252). Wiley.
ISBN:B00DAYO8E2
- :cite:`Li2002a` : Li, C., Luo, M. R., Rigg, B., & Hunt, R. W. G. (2002).
CMC 2000 chromatic adaptation transform: CMCCAT2000. Color Research &
Application, 27(1), 49-58. doi:10.1002/col.10005
- :cite:`Westland2012k` : Westland, S., Ripamonti, C., & Cheung, V. (2012).
CMCCAT2000. In Computational Colour Science Using MATLAB (2nd ed., pp.
83-86). ISBN:978-0-470-66569-5
- :cite:`Zhai2018` : Zhai, Q., & Luo, M. R. (2018). Study of chromatic
adaptation via neutral white matches on different viewing media. Optics
Express, 26(6), 7724. doi:10.1364/OE.26.007724
"""
from __future__ import annotations
from colour.hints import Any, ArrayLike, Literal, NDArrayFloat
from colour.utilities import (
CanonicalMapping,
filter_kwargs,
get_domain_range_scale,
as_float_array,
)
from .datasets import CHROMATIC_ADAPTATION_TRANSFORMS
from .datasets import (
CAT_BIANCO2010,
CAT_BRADFORD,
CAT_CAT02,
CAT_CAT02_BRILL2008,
CAT_CAT16,
CAT_CMCCAT2000,
CAT_CMCCAT97,
CAT_FAIRCHILD,
CAT_PC_BIANCO2010,
CAT_SHARP,
CAT_VON_KRIES,
CAT_XYZ_SCALING,
)
from .vonkries import (
matrix_chromatic_adaptation_VonKries,
chromatic_adaptation_VonKries,
)
from .fairchild1990 import chromatic_adaptation_Fairchild1990
from .cmccat2000 import (
InductionFactors_CMCCAT2000,
VIEWING_CONDITIONS_CMCCAT2000,
chromatic_adaptation_forward_CMCCAT2000,
chromatic_adaptation_inverse_CMCCAT2000,
chromatic_adaptation_CMCCAT2000,
)
from .cie1994 import chromatic_adaptation_CIE1994
from .zhai2018 import chromatic_adaptation_Zhai2018
from colour.utilities import validate_method
__all__ = ["CHROMATIC_ADAPTATION_TRANSFORMS"]
__all__ += [
"CAT_BIANCO2010",
"CAT_BRADFORD",
"CAT_CAT02",
"CAT_CAT02_BRILL2008",
"CAT_CAT16",
"CAT_CMCCAT2000",
"CAT_CMCCAT97",
"CAT_FAIRCHILD",
"CAT_PC_BIANCO2010",
"CAT_SHARP",
"CAT_VON_KRIES",
"CAT_XYZ_SCALING",
]
__all__ += [
"matrix_chromatic_adaptation_VonKries",
"chromatic_adaptation_VonKries",
]
__all__ += [
"chromatic_adaptation_Fairchild1990",
]
__all__ += [
"InductionFactors_CMCCAT2000",
"VIEWING_CONDITIONS_CMCCAT2000",
"chromatic_adaptation_forward_CMCCAT2000",
"chromatic_adaptation_inverse_CMCCAT2000",
"chromatic_adaptation_CMCCAT2000",
]
__all__ += [
"chromatic_adaptation_CIE1994",
]
__all__ += [
"chromatic_adaptation_Zhai2018",
]
CHROMATIC_ADAPTATION_METHODS: CanonicalMapping = CanonicalMapping(
{
"CIE 1994": chromatic_adaptation_CIE1994,
"CMCCAT2000": chromatic_adaptation_CMCCAT2000,
"Fairchild 1990": chromatic_adaptation_Fairchild1990,
"Von Kries": chromatic_adaptation_VonKries,
"Zhai 2018": chromatic_adaptation_Zhai2018,
}
)
CHROMATIC_ADAPTATION_METHODS.__doc__ = """
Supported chromatic adaptation methods.
References
----------
:cite:`CIETC1-321994b`, :cite:`Fairchild1991a`, :cite:`Fairchild2013s`,
:cite:`Fairchild2013t`, :cite:`Li2002a`, :cite:`Westland2012k`,
:cite:`Zhai2018`
"""
def chromatic_adaptation(
XYZ: ArrayLike,
XYZ_w: ArrayLike,
XYZ_wr: ArrayLike,
method: Literal[
"CIE 1994", "CMCCAT2000", "Fairchild 1990", "Zhai 2018", "Von Kries"
]
| str = "Von Kries",
**kwargs: Any,
) -> NDArrayFloat:
"""
Adapt given stimulus from test viewing conditions to reference viewing
conditions.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values of stimulus to adapt.
XYZ_w
Test viewing condition *CIE XYZ* tristimulus values of the whitepoint.
XYZ_wr
Reference viewing condition *CIE XYZ* tristimulus values of the
whitepoint.
method
Computation method.
Other Parameters
----------------
E_o1
{:func:`colour.adaptation.chromatic_adaptation_CIE1994`},
Test illuminance :math:`E_{o1}` in :math:`cd/m^2`.
E_o2
{:func:`colour.adaptation.chromatic_adaptation_CIE1994`},
Reference illuminance :math:`E_{o2}` in :math:`cd/m^2`.
n
{:func:`colour.adaptation.chromatic_adaptation_CIE1994`},
Noise component in fundamental primary system.
Y_o
{:func:`colour.adaptation.chromatic_adaptation_CIE1994`},
Luminance factor :math:`Y_o` of achromatic background normalised to
domain [0.18, 1] in **'Reference'** domain-range scale.
direction
{:func:`colour.adaptation.chromatic_adaptation_CMCCAT2000`},
Chromatic adaptation direction.
L_A1
{:func:`colour.adaptation.chromatic_adaptation_CMCCAT2000`},
Luminance of test adapting field :math:`L_{A1}` in :math:`cd/m^2`.
L_A2
{:func:`colour.adaptation.chromatic_adaptation_CMCCAT2000`},
Luminance of reference adapting field :math:`L_{A2}` in :math:`cd/m^2`.
surround
{:func:`colour.adaptation.chromatic_adaptation_CMCCAT2000`},
Surround viewing conditions induction factors.
discount_illuminant
{:func:`colour.adaptation.chromatic_adaptation_Fairchild1990`},
Truth value indicating if the illuminant should be discounted.
Y_n
{:func:`colour.adaptation.chromatic_adaptation_Fairchild1990`},
Luminance :math:`Y_n` of test adapting stimulus in :math:`cd/m^2`.
D_b
{:func:`colour.adaptation.chromatic_adaptation_Zhai2018`},
Degree of adaptation :math:`D_\\beta` of input illuminant
:math:`\\beta`.
D_d
{:func:`colour.adaptation.chromatic_adaptation_Zhai2018`},
Degree of adaptation :math:`D_\\Delta` of output illuminant
:math:`\\Delta`.
transform
{:func:`colour.adaptation.chromatic_adaptation_VonKries`,
:func:`colour.adaptation.chromatic_adaptation_Zhai2018`},
Chromatic adaptation transform.
XYZ_wo
{:func:`colour.adaptation.chromatic_adaptation_Zhai2018`},
Baseline illuminant (:math:`BI`) :math:`o`.
Returns
-------
:class:`numpy.ndarray`
*CIE XYZ_c* tristimulus values of the stimulus corresponding colour.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_w`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_wr`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
| ``XYZ_wo`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
| ``Y_o`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``XYZ_c`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`CIETC1-321994b`, :cite:`Fairchild1991a`, :cite:`Fairchild2013s`,
:cite:`Fairchild2013t`, :cite:`Li2002a`, :cite:`Westland2012k`
Examples
--------
*Von Kries* chromatic adaptation:
>>> import numpy as np
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_w = np.array([0.95045593, 1.00000000, 1.08905775])
>>> XYZ_wr = np.array([0.96429568, 1.00000000, 0.82510460])
>>> chromatic_adaptation(XYZ, XYZ_w, XYZ_wr)
... # doctest: +ELLIPSIS
array([ 0.2163881..., 0.1257 , 0.0384749...])
*CIE 1994* chromatic adaptation, requires extra *kwargs*:
>>> XYZ = np.array([0.2800, 0.2126, 0.0527])
>>> XYZ_w = np.array([1.09867452, 1.00000000, 0.35591556])
>>> XYZ_wr = np.array([0.95045593, 1.00000000, 1.08905775])
>>> Y_o = 0.20
>>> E_o = 1000
>>> chromatic_adaptation(
... XYZ, XYZ_w, XYZ_wr, method="CIE 1994", Y_o=Y_o, E_o1=E_o, E_o2=E_o
... )
... # doctest: +ELLIPSIS
array([ 0.2403379..., 0.2115621..., 0.1764301...])
*CMCCAT2000* chromatic adaptation, requires extra *kwargs*:
>>> XYZ = np.array([0.2248, 0.2274, 0.0854])
>>> XYZ_w = np.array([1.1115, 1.0000, 0.3520])
>>> XYZ_wr = np.array([0.9481, 1.0000, 1.0730])
>>> L_A = 200
>>> chromatic_adaptation(
... XYZ, XYZ_w, XYZ_wr, method="CMCCAT2000", L_A1=L_A, L_A2=L_A
... )
... # doctest: +ELLIPSIS
array([ 0.1952698..., 0.2306834..., 0.2497175...])
*Fairchild (1990)* chromatic adaptation, requires extra *kwargs*:
>>> XYZ = np.array([0.1953, 0.2307, 0.2497])
>>> Y_n = 200
>>> chromatic_adaptation(
... XYZ, XYZ_w, XYZ_wr, method="Fairchild 1990", Y_n=Y_n
... )
... # doctest: +ELLIPSIS
array([ 0.2332526..., 0.2332455..., 0.7611593...])
*Zhai and Luo (2018)* chromatic adaptation:
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_w = np.array([0.95045593, 1.00000000, 1.08905775])
>>> XYZ_wr = np.array([0.96429568, 1.00000000, 0.82510460])
>>> chromatic_adaptation(XYZ, XYZ_w, XYZ_wr, method="Zhai 2018")
... # doctest: +ELLIPSIS
array([ 0.2163881..., 0.1257 , 0.0384749...])
>>> chromatic_adaptation(
... XYZ,
... XYZ_w,
... XYZ_wr,
... method="Zhai 2018",
... D_b=0.9,
... XYZ_wo=np.array([100, 100, 100]),
... )
... # doctest: +ELLIPSIS
array([ 0.2152436..., 0.1253522..., 0.0388406...])
"""
method = validate_method(method, tuple(CHROMATIC_ADAPTATION_METHODS))
function = CHROMATIC_ADAPTATION_METHODS[method]
domain_range_reference = get_domain_range_scale() == "reference"
domain_100 = (
chromatic_adaptation_CIE1994,
chromatic_adaptation_CMCCAT2000,
chromatic_adaptation_Fairchild1990,
chromatic_adaptation_Zhai2018,
)
if function in domain_100 and domain_range_reference:
XYZ = as_float_array(XYZ) * 100
XYZ_w = as_float_array(XYZ_w) * 100
XYZ_wr = as_float_array(XYZ_wr) * 100
if "Y_o" in kwargs:
kwargs["Y_o"] = kwargs["Y_o"] * 100
if "XYZ_wo" in kwargs:
kwargs["XYZ_wo"] = kwargs["XYZ_wo"] * 100
kwargs.update({"XYZ_w": XYZ_w, "XYZ_wr": XYZ_wr})
if function is chromatic_adaptation_CIE1994:
from colour import XYZ_to_xy
kwargs.update({"xy_o1": XYZ_to_xy(XYZ_w), "xy_o2": XYZ_to_xy(XYZ_wr)})
elif function is chromatic_adaptation_Fairchild1990:
kwargs.update({"XYZ_n": XYZ_w, "XYZ_r": XYZ_wr})
elif function is chromatic_adaptation_Zhai2018:
kwargs.update({"XYZ_wb": XYZ_w, "XYZ_wd": XYZ_wr})
XYZ_c = function(XYZ, **filter_kwargs(function, **kwargs))
if function in domain_100 and domain_range_reference:
XYZ_c /= 100
return XYZ_c
__all__ += [
"CHROMATIC_ADAPTATION_METHODS",
"chromatic_adaptation",
]
|
a2f6efdaf795581402fec1e5995f9c16cbe0387c
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/mako/mako/mako/ext/preprocessors.py
|
80403ecdaebedae0068af3d4f5ebd53249a2eb4e
|
[
"MIT",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
preprocessors.py
|
# ext/preprocessors.py
# Copyright 2006-2022 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""preprocessing functions, used with the 'preprocessor'
argument on Template, TemplateLookup"""
import re
def convert_comments(text):
"""preprocess old style comments.
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=convert_comments)"""
return re.sub(r"(?<=\n)\s*#[^#]", "##", text)
|
5a1ffc1e0344181cc9b68033d2ba17dee6c8401b
|
0349e502733a4c25f020fbcad4715f598d686799
|
/rl_coach/tests/test_golden.py
|
d7b97058851cc36f3cc101dce9b98e68071d4920
|
[
"Apache-2.0"
] |
permissive
|
IntelLabs/coach
|
679592e9887f5788229fef9d77a1a7975e959bc4
|
2c60cb5acd8cd3c9c381a5066c208e69fc273c7b
|
refs/heads/master
| 2023-09-05T17:56:19.435416
| 2022-12-11T17:54:06
| 2022-12-11T17:54:06
| 105,468,219
| 497
| 102
|
Apache-2.0
| 2021-12-27T09:52:30
| 2017-10-01T19:27:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,964
|
py
|
test_golden.py
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import glob
import os
import shutil
import signal
import subprocess
import sys
from importlib import import_module
from os import path
sys.path.append('.')
import numpy as np
import pandas as pd
import time
import pytest
# -*- coding: utf-8 -*-
from rl_coach.logger import screen
def read_csv_paths(test_path, filename_pattern, read_csv_tries=200):
csv_paths = []
tries_counter = 0
while not csv_paths:
csv_paths = glob.glob(path.join(test_path, '*', filename_pattern))
if tries_counter > read_csv_tries:
break
tries_counter += 1
time.sleep(1)
return csv_paths
def print_progress(averaged_rewards, last_num_episodes, preset_validation_params, start_time, time_limit):
percentage = int((100 * last_num_episodes) / preset_validation_params.max_episodes_to_achieve_reward)
sys.stdout.write("\rReward: ({}/{})".format(round(averaged_rewards[-1], 1),
preset_validation_params.min_reward_threshold))
sys.stdout.write(' Time (sec): ({}/{})'.format(round(time.time() - start_time, 2), time_limit))
sys.stdout.write(' Episode: ({}/{})'.format(last_num_episodes,
preset_validation_params.max_episodes_to_achieve_reward))
sys.stdout.write(
' {}%|{}{}| '.format(percentage, '#' * int(percentage / 10), ' ' * (10 - int(percentage / 10))))
sys.stdout.flush()
def import_preset(preset_name):
return import_module('rl_coach.presets.{}'.format(preset_name))
def validation_params(preset_name):
return import_preset(preset_name).graph_manager.preset_validation_params
def all_presets():
return [
f[:-3] for f in os.listdir(os.path.join('rl_coach', 'presets'))
if f[-3:] == '.py' and not f == '__init__.py'
]
def importable(preset_name):
try:
import_preset(preset_name)
return True
except BaseException:
return False
def has_test_parameters(preset_name):
return bool(validation_params(preset_name).test)
def collect_presets():
for preset_name in all_presets():
# if it isn't importable, still include it so we can fail the test
if not importable(preset_name):
yield preset_name
# otherwise, make sure it has test parameters before including it
elif has_test_parameters(preset_name):
yield preset_name
@pytest.fixture(params=list(collect_presets()))
def preset_name(request):
return request.param
@pytest.mark.golden_test
def test_preset_reward(preset_name, no_progress_bar=True, time_limit=60 * 60, verbose=True):
preset_validation_params = validation_params(preset_name)
win_size = 10
test_name = '__test_reward_{}'.format(preset_name)
test_path = os.path.join('./experiments', test_name)
if path.exists(test_path):
shutil.rmtree(test_path)
# run the experiment in a separate thread
screen.log_title("Running test {}".format(preset_name))
log_file_name = 'test_log_{preset_name}.txt'.format(preset_name=preset_name)
cmd = [
'python3',
'rl_coach/coach.py',
'-p', '{preset_name}'.format(preset_name=preset_name),
'-e', '{test_name}'.format(test_name=test_name),
'-n', '{num_workers}'.format(num_workers=preset_validation_params.num_workers),
'--seed', '0',
'-c'
]
if preset_validation_params.reward_test_level:
cmd += ['-lvl', '{level}'.format(level=preset_validation_params.reward_test_level)]
stdout = open(log_file_name, 'w')
p = subprocess.Popen(cmd, stdout=stdout, stderr=stdout)
start_time = time.time()
reward_str = 'Evaluation Reward'
if preset_validation_params.num_workers > 1:
filename_pattern = 'worker_0*.csv'
else:
filename_pattern = '*.csv'
test_passed = False
# get the csv with the results
csv_paths = read_csv_paths(test_path, filename_pattern, read_csv_tries=preset_validation_params.read_csv_tries)
if csv_paths:
csv_path = csv_paths[0]
# verify results
csv = None
time.sleep(1)
averaged_rewards = [0]
last_num_episodes = 0
if not no_progress_bar:
print_progress(averaged_rewards, last_num_episodes, preset_validation_params, start_time, time_limit)
while csv is None or (csv[csv.columns[0]].values[
-1] < preset_validation_params.max_episodes_to_achieve_reward and time.time() - start_time < time_limit):
try:
csv = pd.read_csv(csv_path)
except:
# sometimes the csv is being written at the same time we are
# trying to read it. no problem -> try again
continue
if reward_str not in csv.keys():
continue
rewards = csv[reward_str].values
rewards = rewards[~np.isnan(rewards)]
if len(rewards) >= 1:
averaged_rewards = np.convolve(rewards, np.ones(min(len(rewards), win_size)) / win_size, mode='valid')
else:
time.sleep(1)
continue
if not no_progress_bar:
print_progress(averaged_rewards, last_num_episodes, preset_validation_params, start_time, time_limit)
if csv[csv.columns[0]].shape[0] - last_num_episodes <= 0:
continue
last_num_episodes = csv[csv.columns[0]].values[-1]
# check if reward is enough
if np.any(averaged_rewards >= preset_validation_params.min_reward_threshold):
test_passed = True
break
time.sleep(1)
# kill test and print result
# os.killpg(os.getpgid(p.pid), signal.SIGKILL)
p.kill()
screen.log('')
if test_passed:
screen.success("Passed successfully")
else:
if time.time() - start_time > time_limit:
screen.error("Failed due to exceeding time limit", crash=False)
if verbose:
screen.error("command exitcode: {}".format(p.returncode), crash=False)
screen.error(open(log_file_name).read(), crash=False)
elif csv_paths:
screen.error("Failed due to insufficient reward", crash=False)
if verbose:
screen.error("command exitcode: {}".format(p.returncode), crash=False)
screen.error(open(log_file_name).read(), crash=False)
screen.error("preset_validation_params.max_episodes_to_achieve_reward: {}".format(
preset_validation_params.max_episodes_to_achieve_reward), crash=False)
screen.error("preset_validation_params.min_reward_threshold: {}".format(
preset_validation_params.min_reward_threshold), crash=False)
screen.error("averaged_rewards: {}".format(averaged_rewards), crash=False)
screen.error("episode number: {}".format(csv['Episode #'].values[-1]), crash=False)
screen.error("training iteration: {}".format(csv['Training Iter'].values[-1]), crash=False)
else:
screen.error("csv file never found", crash=False)
if verbose:
screen.error("command exitcode: {}".format(p.returncode), crash=False)
screen.error(open(log_file_name).read(), crash=False)
shutil.rmtree(test_path)
os.remove(log_file_name)
if not test_passed:
raise ValueError('golden test failed')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset', '--presets',
help="(string) Name of preset(s) to run (comma separated, and as configured in presets.py)",
default=None,
type=str)
parser.add_argument('-ip', '--ignore_presets',
help="(string) Name of preset(s) to ignore (comma separated, and as configured in presets.py)",
default=None,
type=str)
parser.add_argument('-v', '--verbose',
help="(flag) display verbose logs in the event of an error",
action='store_true')
parser.add_argument('--stop_after_first_failure',
help="(flag) stop executing tests after the first error",
action='store_true')
parser.add_argument('-tl', '--time_limit',
help="time limit for each test in minutes",
default=60, # setting time limit to be so high due to DDPG being very slow - its tests are long
type=int)
parser.add_argument('-np', '--no_progress_bar',
help="(flag) Don't print the progress bar (makes jenkins logs more readable)",
action='store_true')
args = parser.parse_args()
if args.preset is not None:
presets_lists = args.preset.split(',')
else:
presets_lists = all_presets()
fail_count = 0
test_count = 0
args.time_limit = 60 * args.time_limit
if args.ignore_presets is not None:
presets_to_ignore = args.ignore_presets.split(',')
else:
presets_to_ignore = []
for idx, preset_name in enumerate(sorted(presets_lists)):
if args.stop_after_first_failure and fail_count > 0:
break
if preset_name not in presets_to_ignore:
print("Attempting to run Preset: %s" % preset_name)
if not importable(preset_name):
screen.error("Failed to load preset <{}>".format(preset_name), crash=False)
fail_count += 1
test_count += 1
continue
if not has_test_parameters(preset_name):
continue
test_count += 1
try:
test_preset_reward(preset_name, args.no_progress_bar, args.time_limit, args.verbose)
except Exception as e:
fail_count += 1
screen.separator()
if fail_count == 0:
screen.success(" Summary: " + str(test_count) + "/" + str(test_count) + " tests passed successfully")
else:
screen.error(" Summary: " + str(test_count - fail_count) + "/" + str(test_count) + " tests passed successfully")
if __name__ == '__main__':
main()
|
8bcd2021c1d3a692a0be3f40d2e919bf4914345c
|
438f82adbaa27bcb97cce0171f377ddc92586f48
|
/src/python/python-proto/python_proto/tests/test_common.py
|
891a44ef6c1516c9fb2087f585281c53d27a117c
|
[
"Apache-2.0"
] |
permissive
|
grapl-security/grapl
|
5f93599969ec604df25712c1d16648d16de67072
|
b2c7ef263fb8134add2febb770da164ea7b4936f
|
refs/heads/main
| 2023-08-12T11:38:11.167343
| 2022-12-26T15:28:55
| 2022-12-26T15:28:55
| 151,994,099
| 386
| 60
|
Apache-2.0
| 2022-12-10T05:56:55
| 2018-10-07T23:28:27
|
Rust
|
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
test_common.py
|
import datetime
import uuid
import pytest
pytest.register_assert_rewrite("python_proto.tests.helpers")
from hypothesis import given
from hypothesis import strategies as st
from python_proto.common import Duration, Timestamp, Uuid
from python_proto.tests.helpers import check_encode_decode_invariant
from python_proto.tests.strategies import durations, timestamps, uuids
def test_uuid_encode_decode() -> None:
check_encode_decode_invariant(uuids())
@given(st.uuids())
def test_uuid_from_into(uuid_: uuid.UUID) -> None:
assert Uuid.from_uuid(uuid_).into_uuid() == uuid_
def test_timestamp_encode_decode() -> None:
check_encode_decode_invariant(timestamps())
@given(st.datetimes())
def test_timestamp_from_into(datetime_: datetime.datetime) -> None:
assert Timestamp.from_datetime(datetime_=datetime_).into_datetime() == datetime_
def test_epoch_timestamp_is_since_variant() -> None:
"""Ensure that when a datetime is exactly
1970-01-01T00:00:00.000000Z it is converted into a
"since_epoch" protobuf Timestamp. We might state this
circumstance in words "it has been 0ms since epoch".
"""
epoch = datetime.datetime.utcfromtimestamp(0)
timestamp = Timestamp.from_datetime(epoch)
proto_timestamp = timestamp.into_proto()
assert proto_timestamp.WhichOneof("duration") is not None
assert proto_timestamp.WhichOneof("duration") == "since_epoch"
def test_duration_encode_decode() -> None:
check_encode_decode_invariant(durations())
@given(st.timedeltas(min_value=datetime.timedelta(days=0)))
def test_duration_from_into(timedelta: datetime.timedelta) -> None:
assert Duration.from_timedelta(timedelta=timedelta).into_timedelta() == timedelta
|
6645a4a06f7b4661ad1652a0a342c03b40ef8be9
|
67ce6a1d1369463b15023cc5bd1be9e823bab398
|
/lib/pymedphys/tests/mosaiq/test_patient_name_fields.py
|
2870e8a084e27b605f8b070a14cc2152d83bfb22
|
[
"Apache-2.0"
] |
permissive
|
pymedphys/pymedphys
|
2487efe7259cc4e226e93d32fe86cef01673016e
|
f6acdf9bd2e8a32e372966879284fbd71c612358
|
refs/heads/main
| 2023-08-05T06:27:48.110296
| 2023-06-07T18:22:09
| 2023-06-07T18:22:09
| 168,238,552
| 288
| 79
|
Apache-2.0
| 2023-05-30T03:23:50
| 2019-01-29T22:20:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
test_patient_name_fields.py
|
# Copyright (C) 2021 Derek Lane, Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymedphys._imports import pytest
import pymedphys
from pymedphys._mosaiq import delivery, helpers
from . import _connect
from .data import mocks
@pytest.fixture(name="connection")
def fixture_check_create_test_db() -> pymedphys.mosaiq.Connection:
"""will create the test database, if it does not already exist on the instance"""
mocks.check_create_test_db()
return _connect.connect()
@pytest.mark.mosaiqdb
def test_get_patient_name(connection: pymedphys.mosaiq.Connection):
"""tests the get_patient_name helper function"""
mocks.create_mock_patients()
# test a generic query for patient info
result_all = pymedphys.mosaiq.execute(
connection,
"""
SELECT
Pat_Id1,
First_Name,
Last_Name
FROM Patient
""",
)
# show us the patients
for patient in result_all:
pat_id1, first_name, last_name = patient
print(f"Pat_ID1:{pat_id1} First Name:{first_name} Last Name:{last_name}")
# and check that the correct number were created
assert len(result_all) == 3
# test the get_patient_name helper function
moe_patient_name = helpers.get_patient_name(connection, "MR8002")
# finally spot check Moe
assert moe_patient_name == "HOWARD, Moe"
@pytest.mark.mosaiqdb
def test_get_patient_fields(connection: pymedphys.mosaiq.Connection):
"""creates basic tx field and site metadata for the mock patients"""
# the create_mock_patients output is the patient_ident dataframe
mock_patient_ident_df = mocks.create_mock_patients()
mock_site_df = mocks.create_mock_treatment_sites(mock_patient_ident_df)
mocks.create_mock_treatment_fields(mock_site_df)
# test the get_patient_fields helper function
fields_for_moe_df = helpers.get_patient_fields(connection, "MR8002")
print(fields_for_moe_df)
# make sure the correct number of rows were returned
# with the rng seed, there are 4 fields created for moe
field_count = 3
assert len(fields_for_moe_df) == field_count
# for each treatment field
for _, txfield in fields_for_moe_df.iterrows():
field_id = txfield["field_id"]
# check that the field label matches the field name
assert f"Field{txfield['field_label']}" == txfield["field_name"]
# check for txfield control points
total_mu, point_results = delivery.delivery_data_sql(connection, field_id)
assert total_mu == 100
print(point_results)
# iterate over the txfield results and see if they match
current_index = 0.0
for _, tx_point in point_results.iterrows():
assert tx_point[0] >= current_index
current_index = tx_point[0]
|
5c4c6326f09c0be653632a68dc4afc5c016cd949
|
3d144a23e67c839a4df1c073c6a2c842508f16b2
|
/utils/build_swift/build_swift/defaults.py
|
ce2fe7a4f0f0457d0d0c7d6b2c10ce7687642523
|
[
"Apache-2.0",
"Swift-exception"
] |
permissive
|
apple/swift
|
c2724e388959f6623cf6e4ad6dc1cdd875fd0592
|
98ada1b200a43d090311b72eb45fe8ecebc97f81
|
refs/heads/main
| 2023-08-16T10:48:25.985330
| 2023-08-16T09:00:42
| 2023-08-16T09:00:42
| 44,838,949
| 78,897
| 15,074
|
Apache-2.0
| 2023-09-14T21:19:23
| 2015-10-23T21:15:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,805
|
py
|
defaults.py
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
"""
Default option value definitions.
"""
import os
import platform
from . import shell
from .versions import Version
__all__ = [
# Command line configurable
'BUILD_VARIANT',
'CMAKE_GENERATOR',
'COMPILER_VENDOR',
'SWIFT_USER_VISIBLE_VERSION',
'CLANG_USER_VISIBLE_VERSION',
'SWIFT_ANALYZE_CODE_COVERAGE',
'DARWIN_XCRUN_TOOLCHAIN',
'DARWIN_DEPLOYMENT_VERSION_OSX',
'DARWIN_DEPLOYMENT_VERSION_IOS',
'DARWIN_DEPLOYMENT_VERSION_TVOS',
'DARWIN_DEPLOYMENT_VERSION_WATCHOS',
'UNIX_INSTALL_PREFIX',
'DARWIN_INSTALL_PREFIX',
'LLVM_MAX_PARALLEL_LTO_LINK_JOBS',
'SWIFT_MAX_PARALLEL_LTO_LINK_JOBS',
'DSYMUTIL_JOBS'
# Constants
]
# Options that can be "configured" by command line options
BUILD_VARIANT = 'Debug'
CMAKE_GENERATOR = 'Ninja'
COMPILER_VENDOR = 'none'
SWIFT_USER_VISIBLE_VERSION = Version('5.9')
CLANG_USER_VISIBLE_VERSION = Version('13.0.0')
SWIFT_ANALYZE_CODE_COVERAGE = 'false'
DARWIN_XCRUN_TOOLCHAIN = 'default'
DARWIN_DEPLOYMENT_VERSION_OSX = '10.13'
DARWIN_DEPLOYMENT_VERSION_IOS = '11.0'
DARWIN_DEPLOYMENT_VERSION_TVOS = '11.0'
DARWIN_DEPLOYMENT_VERSION_WATCHOS = '4.0'
UNIX_INSTALL_PREFIX = '/usr'
DARWIN_INSTALL_PREFIX = ('/Applications/Xcode.app/Contents/Developer/'
'Toolchains/XcodeDefault.xctoolchain/usr')
DSYMUTIL_JOBS = 1
def _system_memory():
"""Returns the system memory as an int. None if the system memory cannot
be determined.
TODO: Support Linux and Windows platforms.
"""
if platform.platform() == 'Darwin':
try:
output = shell.check_output(['sysctl', 'hw.memsize']).strip()
return int(output.split(' ')[1])
except shell.CalledProcessError:
return None
return None
def _default_llvm_lto_link_jobs():
"""Use the formula (GB Memory - 3)/6.0GB to get the number of parallel
link threads we can support. This gives the OS 3 GB of room to work with.
This is a bit conservative, but I have found that this heuristic prevents
me from swapping on my test machine.
"""
memory = _system_memory()
if memory is None:
return None
return int((memory / 1000000000.0 - 3.0) / 6.0)
def _default_swift_lto_link_jobs():
"""Use the formula (GB Memory - 3)/8.0GB to get the number of parallel
link threads we can support. This gives the OS 3 GB of room to work with.
This is a bit conservative, but I have found that this heuristic prevents
me from swapping on my test machine.
"""
memory = _system_memory()
if memory is None:
return None
return int((memory / 1000000000.0 - 3.0) / 8.0)
LLVM_MAX_PARALLEL_LTO_LINK_JOBS = _default_llvm_lto_link_jobs()
SWIFT_MAX_PARALLEL_LTO_LINK_JOBS = _default_swift_lto_link_jobs()
def llvm_install_components():
"""Convenience function for getting the default llvm install components for
platforms.
"""
components = ['llvm-ar', 'llvm-cov', 'llvm-profdata', 'IndexStore', 'clang',
'clang-resource-headers', 'compiler-rt', 'clangd', 'LTO']
if os.sys.platform == 'darwin':
components.extend(['dsymutil'])
else:
components.extend(['lld'])
return ';'.join(components)
# Options that can only be "configured" by editing this file.
#
# These options are not exposed as command line options on purpose. If you
# need to change any of these, you should do so on trunk or in a branch.
|
8f0b9d22265cd4e88158a39615fc484c718bffa6
|
f3f4fc4e7844d36488e6b572b8e2f6082f644044
|
/mapchete/cli/default/processes.py
|
23a897a3b94d96036571a938544bf6772319985f
|
[
"MIT"
] |
permissive
|
ungarj/mapchete
|
06278d9dc6937530e5095b16b81628d74d7163f9
|
6c5cf24059dce87bc60df6e487519a3036bf0669
|
refs/heads/main
| 2023-09-02T13:39:44.202901
| 2023-08-27T12:59:08
| 2023-08-27T12:59:08
| 39,515,694
| 192
| 25
|
MIT
| 2023-09-05T10:46:16
| 2015-07-22T15:59:22
|
Python
|
UTF-8
|
Python
| false
| false
| 989
|
py
|
processes.py
|
"""CLI to list processes."""
import logging
import click
from mapchete.cli import options
from mapchete.processes import process_names_docstrings
logger = logging.getLogger(__name__)
@click.command(help="List available processes.")
@click.option(
"--process_name", "-n", type=click.STRING, help="Print docstring of process."
)
@options.opt_debug
def processes(process_name=None, docstrings=False, debug=False):
"""List available processes."""
processes = process_names_docstrings(process_name=process_name)
click.echo("%s processes found" % len(processes))
for process_info in processes:
_print_process_info(process_info, print_docstring=process_name is not None)
def _print_process_info(process_info, print_docstring=False):
name, docstring = process_info
# print process name
click.echo(click.style(name, bold=print_docstring, underline=print_docstring))
# print process docstring
if print_docstring:
click.echo(docstring)
|
c3ecb487ec2048d641fecbf33b04b5ca0551c8ad
|
a5622dafafd782af153be2bc0bd19cb086fd07b2
|
/tests/integration_tests/resources/dsl/agent_tests/plugins/sourceplugin/setup.py
|
a5befc6266a5218d877d74cc49cfc1f90a3d9e72
|
[
"Apache-2.0"
] |
permissive
|
cloudify-cosmo/cloudify-manager
|
8b2d226ad5a9dd8103d7690b2f8081bef24078e1
|
c0de6442e1d7653fad824d75e571802a74eee605
|
refs/heads/master
| 2023-09-06T09:11:51.753912
| 2023-09-04T08:01:58
| 2023-09-04T08:01:58
| 18,326,574
| 146
| 84
|
Apache-2.0
| 2023-09-04T08:02:00
| 2014-04-01T11:06:47
|
Python
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
setup.py
|
from setuptools import setup
setup(name='sourceplugin', py_modules=['plug1'])
|
fcce13afb656ddb37f88b957893f5156c81c4b0c
|
3684dc66398a1920dae24b9f0d855579794378ac
|
/loglizer_demo.py
|
e6afc1ec8adc028579562112211ab53bf82fa502
|
[
"MIT"
] |
permissive
|
logpai/loglizer
|
ae5dbd308fdf8555db1362eead2d290de0e3462d
|
ac67f9727acb660687a77b9c8042553aaa185cd3
|
refs/heads/master
| 2023-07-30T06:29:02.977618
| 2023-03-27T11:32:46
| 2023-03-27T11:32:46
| 58,811,148
| 1,138
| 420
|
MIT
| 2023-03-20T02:12:08
| 2016-05-14T14:08:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,144
|
py
|
loglizer_demo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import pandas as pd
from loglizer.models import PCA, IsolationForest, DecisionTree, LR
from loglizer import dataloader, preprocessing
import pickle
from tqdm import tqdm
from sklearn.ensemble import IsolationForest as iForest
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
import time
import tracemalloc
def get_x_y(windows, content2tempalte):
x = []
y = []
for window in tqdm(windows):
template_list = []
y_list = []
for item in window:
template = content2tempalte.get(item["Content"], "")
template_list.append(template)
y_list.append(item["Label"])
x.append(template_list)
y.append(1 if sum(y_list) > 0 else 0)
return x, y
model_name = "if"
dataname = "BGL"
if __name__ == "__main__":
config_info = {
"BGL": {
"structure_file": "../../data/BGL/BGL.log_structured.csv",
"pkl_path": "../../proceeded_data/BGL/BGL_ws=60m_s=60m_0.8train",
},
"Thunderbird": {
"structure_file": "../../src/Drain_result/Thunderbird.log_structured.csv",
"pkl_path": "../../proceeded_data/Thunderbird/Thunderbird_ws=60m_s=60m_0.8train",
},
}
structure_file = config_info[dataname]["structure_file"]
pkl_path = config_info[dataname]["pkl_path"]
parsed_result = pd.read_csv(structure_file)
content2tempalte = dict(zip(parsed_result["Content"], parsed_result["EventTemplate"]))
with open(os.path.join(pkl_path, "train.pkl"), "rb") as fr:
train_windows = pickle.load(fr)[0:1]
with open(os.path.join(pkl_path, "test.pkl"), "rb") as fr:
test_windows = pickle.load(fr)[0:1]
train_x, train_y = get_x_y(train_windows, content2tempalte)
test_x, test_y = get_x_y(test_windows, content2tempalte)
del parsed_result, content2tempalte
feature_extractor = preprocessing.FeatureExtractor()
if model_name.lower() == "if":
model = iForest(n_estimators=100, max_features=1)
s1 = time.time()
train_feat = feature_extractor.fit_transform(np.array(train_x), term_weighting='tf-idf',
normalization='zero-mean')
model.fit(train_feat)
s2 = time.time()
pred_train = model.decision_function(train_feat)
proba_train = (pred_train-pred_train.min()) / (pred_train.max() - pred_train.min())
s3 = time.time()
test_feat = feature_extractor.transform(np.array(test_x))
pred_test = model.decision_function(test_feat)
s4 = time.time()
proba_test = (pred_test-pred_test.min()) / (pred_test.max() - pred_test.min())
elif model_name.lower() == "dt":
s1 = time.time()
train_feat = feature_extractor.fit_transform(np.array(train_x), term_weighting='tf-idf',
normalization='zero-mean')
model = DecisionTree()
model.fit(train_feat, train_y)
s2 = time.time()
proba_train = model.predict_proba(train_feat)[:, 1]
s3 = time.time()
test_feat = feature_extractor.transform(np.array(test_x))
proba_test = model.predict_proba(test_feat)[:, 1]
s4 = time.time()
elif model_name.lower() == "lr":
s1 = time.time()
train_feat = feature_extractor.fit_transform(np.array(train_x), term_weighting='tf-idf',
normalization='zero-mean')
model = LR()
model.fit(train_feat, train_y)
s2 = time.time()
proba_train = model.predict_proba(train_feat)[:, 1]
s3 = time.time()
test_feat = feature_extractor.transform(np.array(test_x))
proba_test = model.predict_proba(test_feat)[:, 1]
s4 = time.time()
print("Training time for {}: {:.3f}".format(model_name, s2-s1))
print("Testing time for {}: {:.3f}".format(model_name, s4-s3))
# print(f"Peak memory usage: {tracemalloc.get_traced_memory()[1] / (1024*1024):.2f} MB")
|
5de94052cad44d1e457ee66600673c795fc5fc34
|
dcd772f567ef8a8a1173a9f437cd68f211fb9362
|
/ravenframework/utils/cached_ndarray.py
|
c79941cae30ef36f63eae6cc172a76029943e1f5
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
idaholab/raven
|
39cdce98ad916c638399232cdc01a9be00e200a2
|
2b16e7aa3325fe84cab2477947a951414c635381
|
refs/heads/devel
| 2023-08-31T08:40:16.653099
| 2023-08-29T16:21:51
| 2023-08-29T16:21:51
| 85,989,537
| 201
| 126
|
Apache-2.0
| 2023-09-13T21:55:43
| 2017-03-23T19:29:27
|
C++
|
UTF-8
|
Python
| false
| false
| 14,621
|
py
|
cached_ndarray.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Feb 4, 2015
@author: alfoa
"""
#----- python 2 - 3 compatibility
from __future__ import division, print_function, absolute_import
#----- end python 2 - 3 compatibility
#External Modules------------------------------------------------------------------------------------
import sys
import threading
from numpy import ndarray
import numpy as np
lock = threading.Lock()
#External Modules End--------------------------------------------------------------------------------
class c1darray(object):
"""
This class performs the caching of the numpy ndarray class
"""
def __init__(self, shape = (100,), values = None, dtype=float, buff=None, offset=0, strides=None, order=None):
"""
Constructor
@ In, shape, tuple, optional, array shape
@ In, values, numpy.ndarray, optional, array through which this c1darray needs to be initialized
@ In, dtype, np.type, optional, the data type of this array
@ In, buff, int, optional, buffer size
@ In, offset, int, optional, array offeset
@ In, strides, object, optional, strides (see numpy)
@ In, order, string, optional, array ordering (fortran, c, etc) (see numpy)
@ Out, None
"""
if values is not None:
if shape != (100,) and values.shape != shape:
raise IOError("different shape")
if type(values).__name__ != 'ndarray':
raise IOError("Only ndarray is accepted as type.Got "+type(values).__name__)
self.values = values
self.size = values.size
else:
self.values = ndarray(shape, dtype, buff, offset, strides, order)
self.size = 0
try:
self.capacity = self.values.shape[0]
except IndexError:
self.capacity = []
self.ndim = self.values.ndim
def __iter__(self):
"""
Overload of iterator
@ In, None
@ Out, __iter__, iterator, iterator
"""
return self.values[:self.size].__iter__()
def __getitem__(self, val):
"""
Get item method. slicing available:
example 1. c1darrayInstance[5], is going to return the 6th element in the array
example 2. c1darrayInstance[1:3], is going to return an array composed by the 2nd,3rd,4th elements
@ In, val, slice object, the slicing object (e.g. 1, :, :2, 1:3, etc.)
@ Out, __getitem__, array slicing, the element or the slicing
"""
return self.values[:self.size].__getitem__(val)
def __len__(self):
"""
Return size
@ In, None
@ Out, self.size, integer, size
"""
return self.size
def append(self, x):
"""
Append method. call format c1darrayInstance.append(value)
@ In, x, element or array, the value or array to append
@ Out, None (appending in place)
"""
#lock.acquire()
try:
if type(x).__name__ not in ['ndarray', 'c1darray']:
if self.size == self.capacity:
self.capacity *= 4
newdata = np.zeros((self.capacity,),dtype=self.values.dtype)
newdata[:self.size] = self.values[:]
self.values = newdata
self.values[self.size] = x
self.size += 1
else:
if (self.capacity - self.size) < x.size:
# to be safer
self.capacity += max(self.capacity*4, x.size) #self.capacity + x.size*4
newdata = np.zeros((self.capacity,),dtype=self.values.dtype)
newdata[:self.size] = self.values[:self.size]
self.values = newdata
#for index in range(x.size):
self.values[self.size:self.size+x.size] = x[:]
self.size += x.size
finally:
#lock.release()
pass
def returnIndexClosest(self,value):
"""
Function that return the index of the element in the array closest to value
@ In, value, double, query value
@ Out, index, int, index of the element in the array closest to value
"""
index=-1
dist = sys.float_info.max
for i in range(self.size):
if abs(self.values[i]-value)<dist:
dist = abs(self.values[i]-value)
index = i
return index
def returnIndexFirstPassage(self,value):
"""
Function that return the index of the element that firstly crosses value
@ In, value, double, query value
@ Out, index, int, index of the element in the array closest to value
"""
index=-1
dist = sys.float_info.max
for i in range(1,self.size):
if (self.values[i]>=value and self.values[i-1]<=value) or (self.values[i]<=value and self.values[i-1]>=value):
index = i
break
return index
def returnIndexMax(self):
"""
Function that returns the index (i.e. the location) of the maximum value of the array
@ In, None
@ Out, index, int, index of the maximum value of the array
"""
index=-1
maxValue = -sys.float_info.max
for i in range(self.size):
if self.values[i]>=maxValue:
maxValue = self.values[i]
index = i
#break Breaking here guarantees you only ever get the first index (unless you have -sys.float_info_max in first entry)
return index
def returnIndexMin(self):
"""
Function that returns the index (i.e. the location) of the minimum value of the array
@ In, None ,
@ Out, index, int, index of the minimum value of the array
"""
index=-1
minValue = sys.float_info.max
for i in range(self.size):
if self.values[i]<=minValue:
minValue = self.values[i]
index = i
#break Breaking here guarantees you only ever get the first index (unless you have sys.float_info_max in first entry)
return index
def __add__(self, x):
"""
Method to mimic the addition of two arrays
@ In, x, c1darray, the addendum
@ Out, newArray, c1drray, sum of the two arrays
"""
newArray = c1darray(shape = self.size+np.array(x).shape[0], values=self.values[:self.size]+np.array(x))
return newArray
def __radd__(self, x):
"""
reversed-order (LHS <-> RHS) addition
Method to mimic the addition of two arrays
@ In, x, c1darray, the addendum
@ Out, newArray, c1drray, sum of the two arrays
"""
newArray = c1darray(shape = np.array(x).shape[0]+self.size, values=np.array(x)+self.values[:self.size])
return newArray
def __array__(self, dtype = None):
"""
so that numpy's array() returns values
@ In, dtype, np.type, the requested type of the array
@ Out, __array__, numpy.ndarray, the requested array
"""
if dtype != None:
return ndarray((self.size,), dtype, buffer=None, offset=0, strides=None, order=None)
else :
return self.values[:self.size]
def __repr__(self):
"""
overload of __repr__ function
@ In, None
@ Out, __repr__, string, the representation string
"""
return repr(self.values[:self.size])
#
#
#
#
class cNDarray(object):
"""
Higher-dimension caching of numpy arrays. Might include c1darray as a subset if designed right.
DEV NOTE:
When redesigning the DataObjects in RAVEN in 2017, we tried a wide variety of libraries, strategies,
and data structures. For appending one realization (with N entities) at a time, the np.ndarray proved
most efficient for dropping in values, particularly when cached as per this class. Restructuring the data
into a more useful form (e.g. xarray.Dataset) should be accomplished in the DataObject; this is just a collecting
structure. - talbpw, 2017-10-20
"""
### CONSTRUCTOR ###
def __init__(self,values=None,width=None,length=None,dtype=float,buff=None,offset=0,strides=None,order=None):
"""
Constructor.
@ In, values, np.ndarray, optional, matrix of initial values with shape (# samples, # entities)
@ In, width, int, optional, if not using "values" then this is the number of entities to allocate
@ In, length, int, optional, if not using "values" then this is the initial capacity (number of samples) to allocate
@ In, dtype, type, optional, sets the type of the content of the array
@ In, buff, int, optional, buffer size
@ In, offset, int, optional, array offeset
@ In, strides, object, optional, strides (see docs for np.ndarray)
@ In, order, string, optional, array ordering (fortran, c, etc) (see docs for np.ndarray)
@ Out, None
"""
# members of this class
self.values = None # underlying data for this structure, np.ndarray with optional dtype (default float)
self.size = None # number of rows (samples) with actual data (not including empty cached)
self.width = None # number of entities aka columns
self.capacity = None # cached np.ndarray size
# priorities: initialize with values; if not, use width and length
if values is not None:
if type(values) != np.ndarray:
raise IOError('Only np.ndarray can be used to set "values" in "cNDarray". Got '+type(values).__name__)
self.values = values # underlying data structure
self.size = values.shape[0]
try:
self.width = values.shape[1]
except IndexError:
## TODO NEEDS TO BE DEPRECATED should always have a width, in real usage
self.width = 0
# if setting by value, initialize capacity to existing data length
self.capacity = self.size
else:
if width is None:
raise IOError('Creating cNDarray: neither "values" nor "width" was specified!')
self.capacity = length if length is not None else 100
self.width = width
self.size = 0
self.values = ndarray((self.capacity,self.width),dtype,buff,offset,strides,order)
### PROPERTIES ###
@property
def shape(self):
"""
Shape property, as used in np.ndarray structures.
@ In, None
@ Out, (int,int), the (#rows, #columns) of useful data in this cached array
"""
return (self.size,self.width)
### BUILTINS ###
def __array__(self, dtype = None):
"""
so that numpy's array() returns values
@ In, dtype, np.type, the requested type of the array
@ Out, __array__, numpy.ndarray, the requested array
"""
if dtype != None:
return ndarray((self.size,self.width), dtype, buffer=None, offset=0, strides=None, order=None)
else:
return self.getData()
def __getitem__(self,val):
"""
Get item method. Slicing should work as expected.
@ In, val, slice object, the slicing object (e.g. 1, :, :2, 1:3, etc.)
@ Out, __getitem__, np.ndarray, the element(s)
"""
return self.values[:self.size].__getitem__(val)
def __iter__(self):
"""
Overload of iterator
@ In, None
@ Out, __iter__, iterator, iterator
"""
return self.values[:self.size].__iter__()
def __len__(self):
"""
Return size, which is the number of samples, independent of entities, containing useful data.
Does not include cached entries that have not yet been filled.
@ In, None
@ Out, __len__, integer, size
"""
return self.size
def __repr__(self):
"""
overload of __repr__ function
@ In, None
@ Out, __repr__, string, the representation string
"""
return repr(self.values[:self.size])
### UTILITY FUNCTIONS ###
def append(self,entry):
"""
Append method. call format c1darrayInstance.append(value)
@ In, entry, np.ndarray, the entries to append as [entry, entry, entry]. Must have shape (x, # entities), where x can be any nonzero number of samples.
@ Out, None
"""
# TODO extend to include sending in a (width,) shape np.ndarray to append a single sample, rather than have it forced to be a 1-entry array.
# entry.shape[0] is the number of new entries, entry.shape[1] is the number of variables being entered
# entry must match width and be at least 1 entry long
if type(entry) not in [np.ndarray]:
raise IOError('Tried to add new data to cNDarray. Can only accept np.ndarray, but got '+type(entry).__name__)
# for now require full correct shape, later handle the single entry case
if len(entry.shape)!=1:
# TODO single entry case
raise IOError('Tried to add new data to cNDarray. Need shape ({},) but got "{}"!'.format(self.width,entry.shape))
# must have matching width (fix for single entry case)
if entry.shape[0] != self.width:
raise IOError('Tried to add new data to cNDarray. Need {} entries in array, but got '.format(self.width)+str(entry.shape[0]))
# check if there's enough space in cache to append the new entries
if self.size + 1 > self.capacity:
# since there's not enough space, quadruple available space # TODO change growth parameter to be variable?
self.capacity += self.capacity*3
newdata = np.zeros((self.capacity,self.width),dtype=self.values.dtype)
newdata[:self.size] = self.values[:self.size]
self.values = newdata
self.values[self.size] = entry[:]
self.size += 1
def addEntity(self,vals,firstEver=False):
"""
Adds a column to the dataset.
@ In, vals, list, as list(#,#,#) where # is either single-valued or numpy array
@ Out, None
"""
# create a new column with up to the cached capacity
new = np.ndarray(self.capacity,dtype=object)
# fill up to current filled size with the values
new[:self.size] = vals
# reshape so it can be stacked onto the existing data
new = new.reshape(self.capacity,1)
# "hstack" stacks along the second dimension, or columns for us
self.values = np.hstack((self.values,new))
self.width += 1
def getData(self):
"""
Returns the underlying data structure.
@ In, None
@ Out, getData, np.ndarray, underlying data up to the used size
"""
return self.values[:self.size]
def removeEntity(self,index):
"""
Removes a column from this dataset
@ In, index, int, index of entry to remove
@ Out, None
"""
assert(abs(index) < self.width)
self.values = np.delete(self.values,index,axis=1)
self.width -= 1
|
536afefc8fb293eed56b78f3fc2789b35c99a18f
|
e88c2055b62e2b2a61b3a3e25d9ef1472996b92a
|
/doc/conf.py
|
1339835e3b1dc0a384a641ffddbf213a477bd5d7
|
[
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause"
] |
permissive
|
holoviz/holoviz
|
f4eb8156c25b068e2eaa10d0dfd6d4ef17d73e6d
|
8942c5c1fbe399e9693a51b730ea04246ce593d6
|
refs/heads/main
| 2023-08-09T13:15:36.008920
| 2023-08-02T13:39:08
| 2023-08-02T13:39:08
| 104,527,599
| 369
| 52
|
BSD-3-Clause
| 2023-08-28T07:12:24
| 2017-09-22T23:48:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
conf.py
|
# -*- coding: utf-8 -*-
from nbsite.shared_conf import *
project = "HoloViz"
authors = 'HoloViz authors'
copyright_years['start_year'] = '2017'
copyright = copyright_fmt.format(**copyright_years)
description = 'High-level tools to simplify visualization in Python.'
import holoviz
version = release = base_version(holoviz.__version__)
html_static_path += ['_static']
html_css_files = [
'nbsite.css',
'css/custom.css'
]
html_theme = "pydata_sphinx_theme"
html_logo = '_static/holoviz-logo-unstacked.svg'
html_favicon = "_static/favicon.ico"
html_theme_options.update({
"github_url": "https://github.com/holoviz/holoviz",
"icon_links": [
{
"name": "Twitter",
"url": "https://twitter.com/HoloViz_Org",
"icon": "fab fa-twitter-square",
},
{
"name": "Discourse",
"url": "https://discourse.holoviz.org/",
"icon": "fab fa-discourse",
},
]
})
templates_path += ['_templates']
html_context.update({
# Used to add binder links to the latest released tag.
'last_release': f'v{release}',
'github_user': 'holoviz',
'github_repo': 'holoviz',
})
|
71a133a7648e37990b7b10e7690522f4f4b13737
|
4e2117a4381f65e7f2bb2b06da800f40dc98fa12
|
/103_EfficientDet_lite/test.py
|
003dd437b4e02dfa63d88cca85b4e844f9fa8696
|
[
"AGPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"MIT",
"CC-BY-3.0"
] |
permissive
|
PINTO0309/PINTO_model_zoo
|
84f995247afbeda2543b5424d5e0a14a70b8d1f1
|
ff08e6e8ab095d98e96fc4a136ad5cbccc75fcf9
|
refs/heads/main
| 2023-09-04T05:27:31.040946
| 2023-08-31T23:24:30
| 2023-08-31T23:24:30
| 227,367,327
| 2,849
| 520
|
MIT
| 2023-08-31T23:24:31
| 2019-12-11T13:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 5,375
|
py
|
test.py
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#===============================================================================================
import tensorflow_hub as hub
import numpy as np
detector = hub.load("https://tfhub.dev/tensorflow/efficientdet/lite0/detection/1")
from PIL import Image
images = [np.array(Image.open('dog_320x320.jpg'))]
boxes, scores, classes, num_detections = detector(images)
print('===================================== tfhub')
print(boxes)
print(scores)
print(classes)
print(num_detections)
#===============================================================================================
LABELS = [
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'backpack',
'umbrella',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'dining table',
'toilet',
'tv',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush'
]
import cv2
import pprint
from openvino.inference_engine import IECore
ie = IECore()
model = 'openvino/FP16/model'
net = ie.read_network(model=f'{model}.xml', weights=f'{model}.bin')
exec_net = ie.load_network(network=net, device_name='CPU')
img = cv2.imread('dog.jpg')
w = int(img.shape[0])
h = int(img.shape[1])
scale_w = w / 320
scale_h = h / 320
resized_frame = cv2.resize(img, (320, 320))
resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB)
resized_frame = resized_frame[np.newaxis,:,:,:].transpose((0,3,1,2))
print('@@@@@@@@@@@@@@@@@@@@@ resized_frame.shape', resized_frame)
outputs = exec_net.infer(inputs={'serving_default_images:0': resized_frame})
print('===================================== openvino')
# pprint.pprint(outputs)
print(outputs['StatefulPartitionedCall:3'])
print(outputs['StatefulPartitionedCall:3'] * 320)
bboxes = outputs['StatefulPartitionedCall:3'] * 320
bboxes = np.where(bboxes < 0.0, 0.0, bboxes)
print('Slice__1691/Split.0', outputs['Slice__1691/Split.0'].shape)
print('StatefulPartitionedCall:1', outputs['StatefulPartitionedCall:1'].shape)
print('StatefulPartitionedCall:2', outputs['StatefulPartitionedCall:2'].shape)
print('StatefulPartitionedCall:3', outputs['StatefulPartitionedCall:3'].shape)
# bbox = [ymin, xmin, ymax, xmax]
box = bboxes[0][0]
cv2.rectangle(img, (int(box[1] * scale_h), int(box[0] * scale_w)), (int(box[3] * scale_h), int(box[2] * scale_w)), (0,255,0), 2, 16)
box = bboxes[0][1]
cv2.rectangle(img, (int(box[1] * scale_h), int(box[0] * scale_w)), (int(box[3] * scale_h), int(box[2] * scale_w)), (0,255,0), 2, 16)
box = bboxes[0][2]
cv2.rectangle(img, (int(box[1] * scale_h), int(box[0] * scale_w)), (int(box[3] * scale_h), int(box[2] * scale_w)), (0,255,0), 2, 16)
cv2.imwrite('dog_result.jpg', img)
#===============================================================================================
import tensorflow as tf
import pprint
import os
def structure_print():
print('')
print(f'model: {os.path.basename(model_tflite)}')
print('')
print('==INPUT============================================')
pprint.pprint(interpreter.get_input_details())
print('')
print('==OUTPUT===========================================')
pprint.pprint(interpreter.get_output_details())
model_tflite = 'model_float32.tflite'
interpreter = tf.lite.Interpreter(model_tflite, num_threads=4)
interpreter.allocate_tensors()
structure_print()
in_frame = cv2.resize(img, (320, 320))
in_frame = in_frame.reshape((1, 320, 320, 3))
input_index = interpreter.get_input_details()[0]['index']
interpreter.set_tensor(input_index, in_frame.astype(np.float32))
interpreter.invoke()
bboxes = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])
class_ids = interpreter.get_tensor(interpreter.get_output_details()[1]['index'])
confs = interpreter.get_tensor(interpreter.get_output_details()[2]['index'])
print(bboxes.shape)
print(bboxes)
print(class_ids.shape)
print(class_ids) # We need to add +1 to the index of the result.
print(confs.shape)
print(confs)
box = bboxes[0][0]
cv2.rectangle(img, (int(box[1] * scale_h), int(box[0] * scale_w)), (int(box[3] * scale_h), int(box[2] * scale_w)), (0,255,0), 2, 16)
box = bboxes[0][1]
cv2.rectangle(img, (int(box[1] * scale_h), int(box[0] * scale_w)), (int(box[3] * scale_h), int(box[2] * scale_w)), (0,255,0), 2, 16)
box = bboxes[0][2]
cv2.rectangle(img, (int(box[1] * scale_h), int(box[0] * scale_w)), (int(box[3] * scale_h), int(box[2] * scale_w)), (0,255,0), 2, 16)
cv2.imwrite('dog_result_tflite.jpg', img)
|
c48a356eac6a6c4a51c2addba6f25ba1f1fe408b
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/tests/contrib/redis/test_redis_cluster.py
|
5982701dd321a79a96bf5d4b48f01fc0925ffd20
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,947
|
py
|
test_redis_cluster.py
|
# -*- coding: utf-8 -*-
import pytest
import redis
from ddtrace import Pin
from ddtrace.contrib.redis.patch import patch
from ddtrace.contrib.redis.patch import unpatch
from ddtrace.internal.schema import DEFAULT_SPAN_SERVICE_NAME
from tests.contrib.config import REDISCLUSTER_CONFIG
from tests.utils import DummyTracer
from tests.utils import TracerTestCase
from tests.utils import assert_is_measured
@pytest.mark.skipif(redis.VERSION < (4, 1), reason="redis.cluster is not implemented in redis<4.1")
class TestRedisClusterPatch(TracerTestCase):
TEST_HOST = REDISCLUSTER_CONFIG["host"]
TEST_PORTS = REDISCLUSTER_CONFIG["ports"]
def _get_test_client(self):
startup_nodes = [redis.cluster.ClusterNode(self.TEST_HOST, int(port)) for port in self.TEST_PORTS.split(",")]
return redis.cluster.RedisCluster(startup_nodes=startup_nodes)
def setUp(self):
super(TestRedisClusterPatch, self).setUp()
patch()
r = self._get_test_client()
r.flushall()
Pin.override(r, tracer=self.tracer)
self.r = r
def tearDown(self):
unpatch()
super(TestRedisClusterPatch, self).tearDown()
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1"))
def test_span_service_name_v1(self):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
span = spans[0]
assert span.service == DEFAULT_SPAN_SERVICE_NAME
def test_basics(self):
us = self.r.get("cheese")
assert us is None
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_tag("redis.raw_command") == u"GET cheese"
assert span.get_tag("component") == "redis"
assert span.get_tag("db.system") == "redis"
assert span.get_metric("redis.args_length") == 2
assert span.resource == "GET cheese"
def test_unicode(self):
us = self.r.get(u"😐")
assert us is None
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_tag("redis.raw_command") == u"GET 😐"
assert span.get_tag("component") == "redis"
assert span.get_tag("db.system") == "redis"
assert span.get_metric("redis.args_length") == 2
assert span.resource == u"GET 😐"
def test_pipeline(self):
with self.r.pipeline(transaction=False) as p:
p.set("blah", 32)
p.rpush("foo", u"éé")
p.hgetall("xxx")
p.execute()
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert_is_measured(span)
assert span.service == "redis"
assert span.name == "redis.command"
assert span.resource == u"SET blah 32\nRPUSH foo éé\nHGETALL xxx"
assert span.span_type == "redis"
assert span.error == 0
assert span.get_tag("redis.raw_command") == u"SET blah 32\nRPUSH foo éé\nHGETALL xxx"
assert span.get_tag("component") == "redis"
assert span.get_metric("redis.pipeline_length") == 3
def test_patch_unpatch(self):
tracer = DummyTracer()
# Test patch idempotence
patch()
patch()
r = self._get_test_client()
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = tracer.pop()
assert spans, spans
assert len(spans) == 1
# Test unpatch
unpatch()
r = self._get_test_client()
r.get("key")
spans = tracer.pop()
assert not spans, spans
# Test patch again
patch()
r = self._get_test_client()
Pin.get_from(r).clone(tracer=tracer).onto(r)
r.get("key")
spans = tracer.pop()
assert spans, spans
assert len(spans) == 1
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0"))
def test_user_specified_service_v0(self):
"""
When a user specifies a service for the app
The rediscluster integration should not use it.
"""
# Ensure that the service name was configured
from ddtrace import config
assert config.service == "mysvc"
r = self._get_test_client()
Pin.get_from(r).clone(tracer=self.tracer).onto(r)
r.get("key")
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert span.service != "mysvc"
@TracerTestCase.run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1"))
def test_user_specified_service_v1(self):
"""
When a user specifies a service for the app
The rediscluster integration should use it.
"""
# Ensure that the service name was configured
from ddtrace import config
assert config.service == "mysvc"
r = self._get_test_client()
Pin.get_from(r).clone(tracer=self.tracer).onto(r)
r.get("key")
spans = self.get_spans()
assert len(spans) == 1
span = spans[0]
assert span.service == "mysvc"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_REDIS_SERVICE="myrediscluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")
)
def test_env_user_specified_rediscluster_service_v0(self):
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "myrediscluster", span.service
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_REDIS_SERVICE="myrediscluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")
)
def test_env_user_specified_rediscluster_service_v1(self):
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "myrediscluster", span.service
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_SERVICE="app-svc", DD_REDIS_SERVICE="myrediscluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v0")
)
def test_service_precedence_v0(self):
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "myrediscluster"
self.reset()
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_SERVICE="app-svc", DD_REDIS_SERVICE="myrediscluster", DD_TRACE_SPAN_ATTRIBUTE_SCHEMA="v1")
)
def test_service_precedence_v1(self):
self.r.get("cheese")
span = self.get_spans()[0]
assert span.service == "myrediscluster"
self.reset()
|
e386b124970ea37a2a5508a656150d4441c85096
|
915d6cd33ed4293d83a15a2a03bd126a1f03fc97
|
/xknx/io/connection.py
|
448901e8fe7d25a1e49f11628c92484f3ff5b651
|
[
"MIT"
] |
permissive
|
XKNX/xknx
|
5e02e3588ab8b2a4dcd7895b94cd39c2894070a8
|
48d4e31365c15e632b275f0d129cd9f2b2b5717d
|
refs/heads/main
| 2023-09-02T11:18:18.093379
| 2023-08-28T11:06:58
| 2023-08-28T11:06:58
| 51,259,458
| 248
| 131
|
MIT
| 2023-09-11T11:54:55
| 2016-02-07T18:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,402
|
py
|
connection.py
|
"""Manages a connection to the KNX bus."""
from __future__ import annotations
from enum import Enum, auto
import os
from typing import Any
from xknx.telegram.address import IndividualAddress, IndividualAddressableType
from .const import DEFAULT_MCAST_GRP, DEFAULT_MCAST_PORT
from .gateway_scanner import GatewayScanFilter
class ConnectionType(Enum):
"""Enum class for different types of KNX/IP Connections."""
AUTOMATIC = auto()
ROUTING = auto()
ROUTING_SECURE = auto()
TUNNELING = auto()
TUNNELING_TCP = auto()
TUNNELING_TCP_SECURE = auto()
class ConnectionConfig:
"""
Connection configuration.
Handles:
* type of connection:
* AUTOMATIC for using GatewayScanner for searching and finding KNX/IP devices in the network.
* ROUTING use KNX/IP multicast routing.
* TUNNELING connect to a specific KNX/IP tunneling device via UDP.
* TUNNELING_TCP connect to a specific KNX/IP tunneling v2 device via TCP.
* individual address:
* AUTOMATIC use a specific tunnel endpoint from a given knxkeys file
* ROUTING the individual address used as source address for routing
* TCP TUNNELING request a specific tunnel endpoint
* SECURE TUNNELING use a specific tunnel endpoint from the knxkeys file
* local_ip: Local ip or interface name though which xknx should connect.
* gateway_ip: IP or hostname of KNX/IP tunneling device.
* gateway_port: Port of KNX/IP tunneling device.
* route_back: For UDP TUNNELING connection.
The KNXnet/IP Server shall use the IP address and port in the received IP package
as the target IP address or port number for the response to the KNXnet/IP Client.
* multicast_group: Multicast group for KNXnet/IP routing.
* multicast_port: Multicast port for KNXnet/IP routing.
* auto_reconnect: Auto reconnect to KNX/IP tunneling device if connection cannot be established.
* auto_reconnect_wait: Wait n seconds before trying to reconnect to KNX/IP tunneling device.
* scan_filter: For AUTOMATIC connection, limit scan with the given filter
* threaded: Run connection logic in separate thread to avoid concurrency issues in HA
* secure_config: KNX Secure config to use
"""
def __init__(
self,
*,
connection_type: ConnectionType = ConnectionType.AUTOMATIC,
individual_address: IndividualAddressableType | None = None,
local_ip: str | None = None,
local_port: int = 0,
gateway_ip: str | None = None,
gateway_port: int = DEFAULT_MCAST_PORT,
route_back: bool = False,
multicast_group: str = DEFAULT_MCAST_GRP,
multicast_port: int = DEFAULT_MCAST_PORT,
auto_reconnect: bool = True,
auto_reconnect_wait: int = 3,
scan_filter: GatewayScanFilter | None = None,
threaded: bool = False,
secure_config: SecureConfig | None = None,
):
"""Initialize ConnectionConfig class."""
self.connection_type = connection_type
self.individual_address = (
IndividualAddress(individual_address) if individual_address else None
)
self.local_ip = local_ip
self.local_port = local_port
self.gateway_ip = gateway_ip
self.gateway_port = gateway_port
self.route_back = route_back
self.multicast_group = multicast_group
self.multicast_port = multicast_port
self.auto_reconnect = auto_reconnect
self.auto_reconnect_wait = auto_reconnect_wait
self.scan_filter = scan_filter or GatewayScanFilter()
self.threaded = threaded
self.secure_config = secure_config
def __eq__(self, other: object) -> bool:
"""Equality for ConnectionConfig class (used in unit tests)."""
return self.__dict__ == other.__dict__
class SecureConfig:
"""
Secure configuration.
Handles:
* backbone_key: Key used for KNX Secure Routing in hex representation.
* latency_ms: Latency in milliseconds for KNX Secure Routing.
* user_id: The user id to use when initializing the secure tunnel.
* device_authentication_password: the authentication password to use when connecting to the tunnel.
* user_password: the user password for knx secure.
* knxkeys_file_path: Full path to the knxkeys file including the file name.
* knxkeys_password: Password to decrypt the knxkeys file.
"""
def __init__(
self,
*,
backbone_key: str | None = None,
latency_ms: int | None = None,
user_id: int | None = None,
device_authentication_password: str | None = None,
user_password: str | None = None,
knxkeys_file_path: str | os.PathLike[Any] | None = None,
knxkeys_password: str | None = None,
):
"""Initialize SecureConfig class."""
self.backbone_key = bytes.fromhex(backbone_key) if backbone_key else None
self.latency_ms = latency_ms
self.user_id = user_id
self.device_authentication_password = device_authentication_password
self.user_password = user_password
self.knxkeys_file_path = knxkeys_file_path
self.knxkeys_password = knxkeys_password
def __eq__(self, other: object) -> bool:
"""Equality for SecureConfig class (used in unit tests)."""
return self.__dict__ == other.__dict__
|
049a736e37bc2af45e009544a4fd0a9e3082fb65
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/onnxruntime/python/tools/kernel_explorer/kernels/gemm_fast_gelu_test.py
|
9b308c09811d1113ad71fa07db4f882ddb3fbee6
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 7,253
|
py
|
gemm_fast_gelu_test.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
import sys
from dataclasses import dataclass
from itertools import product
import kernel_explorer as ke
import numpy as np
import pytest
from utils import (
dtype_to_suffix,
fast_gelu,
get_gemm_basic_sizes,
get_gemm_bert_sizes,
get_gemm_bound,
matmul,
transab_to_suffix,
)
# TODO The test method needs update.
def _test_gemmfastgelu(my_func, dtype: str, m: int, n: int, k: int, transa=False, transb=False):
assert dtype in ["float16", "float32"]
a_shape = (k, m) if transa else (m, k)
b_shape = (n, k) if transb else (k, n)
np.random.seed(0)
a = (np.random.rand(*a_shape)).astype(dtype).astype("float64")
b = (np.random.rand(*b_shape)).astype(dtype).astype("float64")
bias = (np.random.rand(n)).astype(dtype)
temp_c = matmul(a, b, transa, transb)
bound = get_gemm_bound(dtype, a, b, temp_c, transa, transb, a_b_positive=True)
temp_c = temp_c.astype(dtype)
ref_c = fast_gelu(temp_c, bias)
a = a.astype(dtype)
b = b.astype(dtype)
my_c = np.zeros((m, n), dtype=dtype)
dev_a = ke.DeviceArray(a)
dev_b = ke.DeviceArray(b)
dev_bias = ke.DeviceArray(bias)
dev_c = ke.DeviceArray(my_c)
opa = ke.blas_op.T if transa else ke.blas_op.N
opb = ke.blas_op.T if transb else ke.blas_op.N
lda = a_shape[1]
ldb = b_shape[1]
alpha = 1.0
beta = 0.0
my_op = my_func(opa, opb, m, n, k, alpha, dev_a, lda, dev_b, ldb, dev_bias, beta, dev_c, n)
print(f"dtype={dtype} {transab_to_suffix((transa, transb))} m={m:<5} n={n:<5} k={k:<5} bound: {max(bound, 1e-2)}")
for impl in my_op.ListOps():
if not my_op.SelectOp(impl):
continue
my_op.Run()
dev_c.UpdateHostNumpyArray()
np.testing.assert_allclose(my_c, ref_c, rtol=max(bound, 1e-2))
dtypes = ["float16", "float32"]
all_transabs = list(product([True, False], repeat=2))
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("size", get_gemm_basic_sizes(full=False) + get_gemm_bert_sizes(full=False))
@pytest.mark.parametrize("transab", all_transabs)
def test_gemmfastgelu_unfused_bert_cases(dtype, size, transab):
_test_gemmfastgelu(getattr(ke, "GemmFastGeluUnfused_" + dtype_to_suffix(dtype)), dtype, *size, *transab)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("size", get_gemm_basic_sizes(full=False) + get_gemm_bert_sizes(full=False))
@pytest.mark.parametrize("transab", all_transabs)
def test_gemmfastgelu_tunable_bert_cases(dtype, size, transab):
wrapper_name = f"GemmFastGeluTunable_{dtype_to_suffix(dtype)}_{transab_to_suffix(transab)}"
_test_gemmfastgelu(getattr(ke, wrapper_name), dtype, *size, *transab)
@pytest.mark.skipif(not ke.is_composable_kernel_available(), reason="ck is not enabled")
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("size", get_gemm_basic_sizes(full=False) + get_gemm_bert_sizes(full=False))
@pytest.mark.parametrize("transab", all_transabs)
def test_gemmfastgelu_ck_bert_cases(dtype, size, transab):
wrapper_name = f"CKGemmFastGelu_{dtype_to_suffix(dtype)}_{transab_to_suffix(transab)}"
_test_gemmfastgelu(getattr(ke, wrapper_name), dtype, *size, *transab)
@pytest.mark.skipif(not ke.is_hipblaslt_available(), reason="hipblaslt is not available")
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("size", get_gemm_basic_sizes(full=False) + get_gemm_bert_sizes(full=False))
@pytest.mark.parametrize("transab", all_transabs)
def test_gemmfastgelu_hipblaslt_bert_cases(dtype, size, transab):
_test_gemmfastgelu(getattr(ke, "GemmFastGeluHipBlasLt_" + dtype_to_suffix(dtype)), dtype, *size, *transab)
@dataclass
class GemmFastGeluMetric(ke.ComputeMetric):
transa: bool
transb: bool
m: int
n: int
k: int
def report(self):
transab = transab_to_suffix((self.transa, self.transb))
common = f"{self.dtype} m={self.m:<4} n={self.n:<4} k={self.k:<4} {transab}, {self.name}"
if self.duration <= 0:
return "not supported " + common
return f"{self.duration:>6.2f} us {self.tflops:>5.2f} tflops " + common
def profile_gemmfastgelu_func(my_func, dtype: str, m: int, n: int, k: int, transa: bool, transb: bool):
a_shape = (k, m) if transa else (m, k)
b_shape = (n, k) if transb else (k, n)
np.random.seed(0)
a = (np.random.rand(*a_shape) * 2 - 1).astype(dtype)
b = (np.random.rand(*b_shape) * 2 - 1).astype(dtype)
my_c = np.zeros((m, n), dtype=dtype)
bias = np.random.rand(n).astype(dtype)
dev_a = ke.DeviceArray(a)
dev_b = ke.DeviceArray(b)
dev_bias = ke.DeviceArray(bias)
dev_c = ke.DeviceArray(my_c)
opa = ke.blas_op.T if transa else ke.blas_op.N
opb = ke.blas_op.T if transb else ke.blas_op.N
lda = a_shape[1]
ldb = b_shape[1]
alpha = 1.0
beta = 0.0
my_op = my_func(opa, opb, m, n, k, alpha, dev_a, lda, dev_b, ldb, dev_bias, beta, dev_c, n)
for impl in my_op.ListOps():
duration_ms = -1
if my_op.SelectOp(impl):
duration_ms = my_op.Profile()
# only counts gemm tflops because fastgelu is low order term (7 * n).
floating_point_operations = m * k * n * 2
ke.report(GemmFastGeluMetric(impl, dtype, duration_ms, floating_point_operations, transa, transb, m, n, k))
def profile_with_args(transa, transb, dtype, m, n, k, sort):
dtype_suffix = "_" + dtype_to_suffix(dtype)
transab_suffix = "_" + transab_to_suffix((transa, transb))
with ke.benchmark(sort):
profile_gemmfastgelu_func(getattr(ke, "GemmFastGeluUnfused" + dtype_suffix), dtype, m, n, k, transa, transb)
profile_gemmfastgelu_func(
getattr(ke, "CKGemmFastGelu" + dtype_suffix + transab_suffix), dtype, m, n, k, transa, transb
)
profile_gemmfastgelu_func(
getattr(ke, "GemmFastGeluTunable" + dtype_suffix + transab_suffix), dtype, m, n, k, transa, transb
)
if ke.is_hipblaslt_available():
profile_gemmfastgelu_func(
getattr(ke, "GemmFastGeluHipBlasLt" + dtype_suffix + transab_suffix), dtype, m, n, k, transa, transb
)
def profile():
for dtype in dtypes:
for m, n, k in get_gemm_bert_sizes(full=True):
profile_with_args(False, False, dtype, m, n, k, True)
print()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
group = parser.add_argument_group("profile with args")
group.add_argument("transa", choices="NT")
group.add_argument("transb", choices="NT")
group.add_argument("dtype", choices=dtypes)
group.add_argument("m", type=int)
group.add_argument("n", type=int)
group.add_argument("k", type=int)
group.add_argument("--sort", action="store_true")
if len(sys.argv) == 1:
profile()
else:
args = parser.parse_args()
profile_with_args(args.transa == "T", args.transb == "T", args.dtype, args.m, args.n, args.k, args.sort)
|
a64f4e225ebab58e0f0bb20b7f1a59ca93080b99
|
0b6c8fe083df77862a7d3b1c82e128a74c32272d
|
/embeddings/elmo.py
|
2928b66cb05ea3c98744da443f0911e136264b88
|
[
"MIT"
] |
permissive
|
vzhong/embeddings
|
fa34fc19f15149b8b0dd63e138afdf8c01c93b37
|
868b117bca4d9ac3e967bba5d895625db02cb2f3
|
refs/heads/master
| 2023-09-01T19:43:06.373578
| 2021-09-20T20:04:53
| 2021-09-20T20:04:53
| 83,266,708
| 226
| 29
|
MIT
| 2023-08-24T11:24:06
| 2017-02-27T04:05:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
elmo.py
|
import random
from collections import namedtuple
from os import path, makedirs
import zipfile
from tqdm import tqdm
from embeddings.embedding import Embedding
class ElmoEmbedding(Embedding):
"""
Reference: https://allennlp.org/elmo
"""
settings = {
'weights': 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5',
'options': 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json',
}
def __init__(self):
from allennlp.modules.elmo import _ElmoCharacterEncoder
if not path.isdir(self.path('elmo')):
makedirs(self.path('elmo'))
self.fweights = self.ensure_file(path.join('elmo', 'weights.hdf5'), url=self.settings['weights'])
self.foptions = self.ensure_file(path.join('elmo', 'options.json'), url=self.settings['options'])
self.embeddings = _ElmoCharacterEncoder(self.foptions, self.fweights)
def emb(self, word, default=None):
from allennlp.modules.elmo import batch_to_ids
idx = batch_to_ids([[word]])
emb = self.embeddings(idx)['token_embedding']
return emb[0, 1].tolist()
if __name__ == '__main__':
from time import time
emb = ElmoEmbedding()
for w in ['canada', 'vancouver', 'toronto']:
start = time()
print('embedding {}'.format(w))
print('size {}'.format(len(emb.emb(w))))
print('took {}s'.format(time() - start))
|
62427ec5a4ec665c4734b879f183d8e1f93856b0
|
c531778b6b568e5924fcf438dce274067b6e1d31
|
/packages/h11/tests/test_helpers.py
|
1477947afa6f10cedf4701a80f31e5c92bc41a6a
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
CastagnaIT/plugin.video.netflix
|
a5180fbbaea244a490f750a2dd417b4e7303321a
|
ece10d24449faaccd7d65a4093c6b5679ee0b383
|
refs/heads/master
| 2023-07-01T23:32:20.442923
| 2023-06-27T06:42:18
| 2023-06-27T06:42:18
| 164,314,803
| 2,019
| 456
|
MIT
| 2023-09-13T13:34:06
| 2019-01-06T14:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 573
|
py
|
test_helpers.py
|
from .helpers import *
def test_normalize_data_events():
assert normalize_data_events(
[
Data(data=bytearray(b"1")),
Data(data=b"2"),
Response(status_code=200, headers=[]),
Data(data=b"3"),
Data(data=b"4"),
EndOfMessage(),
Data(data=b"5"),
Data(data=b"6"),
Data(data=b"7"),
]
) == [
Data(data=b"12"),
Response(status_code=200, headers=[]),
Data(data=b"34"),
EndOfMessage(),
Data(data=b"567"),
]
|
431a5b2e14e309ef57c073bc9c2a0111833d0b04
|
83efec99c7c2fc08afb845fcab8328a5e1a2e687
|
/gis4wrf/plugin/ui/widget_met.py
|
4b376c2d36efa5a0382ccaf38630ed9ee47056c3
|
[
"MIT"
] |
permissive
|
GIS4WRF/gis4wrf
|
a377fa79c207e110852bc4bf68acbc25be1a8fbc
|
9b311066d97595317fac9348a2acb35b04500651
|
refs/heads/master
| 2023-02-24T19:54:32.579246
| 2023-02-13T15:34:46
| 2023-02-13T15:34:46
| 137,216,497
| 152
| 34
|
MIT
| 2023-02-13T15:34:48
| 2018-06-13T13:03:39
|
Python
|
UTF-8
|
Python
| false
| false
| 11,432
|
py
|
widget_met.py
|
# GIS4WRF (https://doi.org/10.5281/zenodo.1288569)
# Copyright (c) 2018 D. Meyer and M. Riechert. Licensed under MIT.
from PyQt5.QtCore import Qt, QDate, QTime, QDateTime
from PyQt5.QtGui import QDoubleValidator
from PyQt5.QtWidgets import (
QWidget, QPushButton, QVBoxLayout, QGridLayout, QGroupBox, QLabel, QHBoxLayout,
QComboBox, QRadioButton, QTreeWidget, QTreeWidgetItem, QDateTimeEdit, QTreeWidgetItemIterator,
QListWidget, QListWidgetItem, QProgressBar, QMessageBox
)
from gis4wrf.core import (
met_datasets, get_met_products, is_met_dataset_downloaded, get_met_dataset_path, download_met_dataset,
CRS, UserError, logger)
from gis4wrf.plugin.options import get_options
from gis4wrf.plugin.geo import rect_to_bbox
from gis4wrf.plugin.broadcast import Broadcast
from gis4wrf.plugin.ui.helpers import add_grid_lineedit, MessageBar, reraise
from gis4wrf.plugin.ui.thread import TaskThread
DECIMALS = 50
LON_VALIDATOR = QDoubleValidator(-180.0, 180.0, DECIMALS)
LAT_VALIDATOR = QDoubleValidator(-90.0, 90.0, DECIMALS)
# higher resolution than default (100)
PROGRESS_BAR_MAX = 1000
# TODO display bbox as vector layer if not global extent
class MetToolsDownloadManager(QWidget):
def __init__(self, iface) -> None:
super().__init__()
self.iface = iface
self.options = get_options()
self.msg_bar = MessageBar(iface)
vbox = QVBoxLayout()
self.setLayout(vbox)
hbox = QHBoxLayout()
vbox.addLayout(hbox)
hbox.addWidget(QLabel('Dataset: '))
self.cbox_dataset = QComboBox()
self.cbox_dataset.addItem('-')
for index, (dataset_name, dataset_label) in enumerate(met_datasets.items()):
self.cbox_dataset.addItem(dataset_name, dataset_name)
self.cbox_dataset.setItemData(index + 1, dataset_label, Qt.ToolTipRole)
self.cbox_dataset.currentIndexChanged.connect(self.on_dataset_changed)
hbox.addWidget(self.cbox_dataset)
hbox_product_name = QHBoxLayout()
vbox.addLayout(hbox_product_name)
hbox_product_name.addWidget(QLabel('Product: '))
self.cbox_product = QComboBox()
self.cbox_product.currentIndexChanged.connect(self.on_product_changed)
hbox_product_name.addWidget(self.cbox_product)
hbox_start_datetime = QHBoxLayout()
vbox.addLayout(hbox_start_datetime)
self.dedit_start_date = QDateTimeEdit()
self.dedit_start_date.setCalendarPopup(True)
hbox_start_datetime.addWidget(QLabel('Start: '))
hbox_start_datetime.addWidget(self.dedit_start_date)
hbox_end_datetime = QHBoxLayout()
vbox.addLayout(hbox_end_datetime)
self.dedit_end_date = QDateTimeEdit()
self.dedit_end_date.setCalendarPopup(True)
hbox_end_datetime.addWidget(QLabel('End: '))
hbox_end_datetime.addWidget(self.dedit_end_date)
gbox_extent = QGroupBox('Extent')
vbox.addWidget(gbox_extent)
vbox_extent = QVBoxLayout()
gbox_extent.setLayout(vbox_extent)
hbox_extent = QHBoxLayout()
vbox_extent.addLayout(hbox_extent)
self.radio_global = QRadioButton('Global')
self.radio_global.toggled.connect(self.on_extent_radio_button_clicked)
hbox_extent.addWidget(self.radio_global)
self.radio_subset = QRadioButton('Subset')
self.radio_subset.toggled.connect(self.on_extent_radio_button_clicked)
hbox_extent.addWidget(self.radio_subset)
self.widget_extent = QWidget()
vbox_extent.addWidget(self.widget_extent)
grid_extent = QGridLayout()
self.widget_extent.setLayout(grid_extent)
self.widget_extent.hide()
self.top = add_grid_lineedit(grid_extent, 0, 'North Latitude',
LAT_VALIDATOR, '°', required=True)
self.right = add_grid_lineedit(grid_extent, 1, 'East Longitude',
LON_VALIDATOR, '°', required=True)
self.left = add_grid_lineedit(grid_extent, 2, 'West Longitude',
LON_VALIDATOR, '°', required=True)
self.bottom = add_grid_lineedit(grid_extent, 3, 'South Latitude',
LAT_VALIDATOR, '°', required=True)
self.extent_from_active_layer = QPushButton('Set from Active Layer')
grid_extent.addWidget(self.extent_from_active_layer, 4, 1)
self.extent_from_active_layer.clicked.connect(self.on_extent_from_active_layer_button_clicked)
self.radio_global.setChecked(True)
self.tree = QListWidget()
vbox_tree = QVBoxLayout()
vbox.addLayout(vbox_tree)
vbox_tree.addWidget(self.tree)
self.btn_download = QPushButton('Download')
self.btn_download.clicked.connect(self.on_download_button_clicked)
vbox.addWidget(self.btn_download)
self.progress_bar = QProgressBar()
self.progress_bar.setRange(0, PROGRESS_BAR_MAX)
self.progress_bar.setTextVisible(False)
self.progress_bar.hide()
vbox.addWidget(self.progress_bar)
def on_dataset_changed(self, index: int):
self.cbox_product.clear()
dataset_name = self.cbox_dataset.currentData()
if dataset_name is None:
return
auth = (self.options.rda_username, self.options.rda_password)
self.products = get_met_products(dataset_name, auth)
for product in self.products.keys():
self.cbox_product.addItem(product, product)
def on_product_changed(self, index: int):
if index == -1:
return
self.tree.clear()
product_name = self.cbox_product.currentData()
current_avail_vars = self.products[product_name]
dates = []
for name in current_avail_vars.keys():
item = QListWidgetItem(current_avail_vars[name]['label'])
item.setData(Qt.UserRole, name)
item.setCheckState(Qt.Checked)
self.tree.addItem(item)
dates.append(current_avail_vars[name]['start_date'])
dates.append(current_avail_vars[name]['end_date'])
date_min = min(dates)
date_max = max(dates)
for dt_input in [self.dedit_start_date, self.dedit_end_date]:
dt_input.setDateTimeRange(
QDateTime(QDate(date_min.year, date_min.month, date_min.day), QTime(date_min.hour, date_min.minute)),
QDateTime(QDate(date_max.year, date_max.month, date_max.day), QTime(date_max.hour, date_max.minute)))
min_dt = self.dedit_start_date.minimumDateTime()
max_dt = self.dedit_start_date.maximumDateTime()
self.dedit_start_date.setDateTime(min_dt)
self.dedit_end_date.setDateTime(max_dt)
def on_download_button_clicked(self):
param_names = []
for index in range(self.tree.count()):
item = self.tree.item(index)
if item.checkState() == Qt.Checked:
param_name = item.data(Qt.UserRole)
param_names.append(param_name)
dataset_name = self.cbox_dataset.currentData()
product_name = self.cbox_product.currentData()
start_date = self.dedit_start_date.dateTime().toPyDateTime()
end_date = self.dedit_end_date.dateTime().toPyDateTime()
if dataset_name is None or product_name is None:
raise UserError('Dataset/Product not selected')
args = [self.options.met_dir, dataset_name, product_name, start_date, end_date]
if is_met_dataset_downloaded(*args):
reply = QMessageBox.question(self.iface.mainWindow(), 'Existing dataset',
('You already downloaded data with the selected dataset/product/date/time combination. '
'If you continue, this data will be removed.\n'
'Location: {}'.format(get_met_dataset_path(*args))),
QMessageBox.Ok, QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return
lat_north = self.top.value()
lat_south = self.bottom.value()
lon_west = self.left.value()
lon_east = self.right.value()
auth = (self.options.rda_username, self.options.rda_password)
thread = TaskThread(
lambda: download_met_dataset(self.options.met_dir, auth,
dataset_name, product_name, param_names,
start_date, end_date,
lat_south, lat_north, lon_west, lon_east),
yields_progress=True)
thread.started.connect(self.on_started_download)
thread.progress.connect(self.on_progress_download)
thread.finished.connect(self.on_finished_download)
thread.succeeded.connect(self.on_successful_download)
thread.failed.connect(reraise)
thread.start()
def on_started_download(self) -> None:
self.btn_download.hide()
self.progress_bar.show()
def on_progress_download(self, progress: float, status: str) -> None:
bar_value = int(progress * PROGRESS_BAR_MAX)
self.progress_bar.setValue(bar_value)
self.progress_bar.repaint() # otherwise just updates in 1% steps
if status == 'submitted':
self.msg_bar.info('Met dataset download request submitted successfully, waiting until available for download...')
elif status == 'ready':
self.msg_bar.info('Met dataset download request is now ready, downloading...')
logger.debug(f'Met data download: {progress*100:.1f}% - {status}')
def on_finished_download(self) -> None:
self.btn_download.show()
self.progress_bar.hide()
def on_successful_download(self) -> None:
self.msg_bar.success('Meteorological dataset downloaded successfully.')
Broadcast.met_datasets_updated.emit()
def on_extent_radio_button_clicked(self):
if self.radio_global.isChecked():
self.top.set_value(90)
self.bottom.set_value(-90)
self.left.set_value(-180)
self.right.set_value(180)
self.top.setDisabled(True)
self.bottom.setDisabled(True)
self.left.setDisabled(True)
self.right.setDisabled(True)
self.widget_extent.hide()
elif self.radio_subset.isChecked():
self.widget_extent.show()
self.top.setDisabled(False)
self.bottom.setDisabled(False)
self.left.setDisabled(False)
self.right.setDisabled(False)
def on_extent_from_active_layer_button_clicked(self):
layer = self.iface.activeLayer() # type: Optional[QgsMapLayer]
if layer is None:
return
layer_crs = CRS(layer.crs().toProj4())
target_crs = CRS('+proj=latlong +datum=WGS84')
extent = layer.extent() # type: QgsRectangle
bbox = rect_to_bbox(extent)
bbox_geo = layer_crs.transform_bbox(bbox, target_crs.srs)
padding = 5 # degrees
lat_south = max(bbox_geo.miny - 5, -90)
lat_north = min(bbox_geo.maxy + 5, 90)
lon_west = max(bbox_geo.minx - 5, -180)
lon_east = min(bbox_geo.maxx + 5, 180)
self.bottom.set_value(lat_south)
self.top.set_value(lat_north)
self.left.set_value(lon_west)
self.right.set_value(lon_east)
|
15b7bd2f0fca9712abe244727aaf0d05194c52e6
|
f2fc344b95a3f0b98c34ba5fe1a8c3e08b176c2b
|
/run.py
|
d78448600473d74939d4a820e1b9910f46cc8034
|
[
"MIT"
] |
permissive
|
microsoft/Bringing-Old-Photos-Back-to-Life
|
bc6ca6b84ac0f20e50fb9f7ebc45297d3ec10b1f
|
33875eccf4ebcd3665cf38cc56f3a0ce563d3a9c
|
refs/heads/master
| 2023-08-20T06:50:51.131642
| 2022-07-23T06:52:29
| 2022-07-23T06:52:29
| 274,594,200
| 14,159
| 1,863
|
MIT
| 2023-06-07T04:43:29
| 2020-06-24T06:37:58
|
Python
|
UTF-8
|
Python
| false
| false
| 6,775
|
py
|
run.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import argparse
import shutil
import sys
from subprocess import call
def run_cmd(command):
try:
call(command, shell=True)
except KeyboardInterrupt:
print("Process interrupted")
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_folder", type=str, default="./test_images/old", help="Test images")
parser.add_argument(
"--output_folder",
type=str,
default="./output",
help="Restored images, please use the absolute path",
)
parser.add_argument("--GPU", type=str, default="6,7", help="0,1,2")
parser.add_argument(
"--checkpoint_name", type=str, default="Setting_9_epoch_100", help="choose which checkpoint"
)
parser.add_argument("--with_scratch", action="store_true")
parser.add_argument("--HR", action='store_true')
opts = parser.parse_args()
gpu1 = opts.GPU
# resolve relative paths before changing directory
opts.input_folder = os.path.abspath(opts.input_folder)
opts.output_folder = os.path.abspath(opts.output_folder)
if not os.path.exists(opts.output_folder):
os.makedirs(opts.output_folder)
main_environment = os.getcwd()
## Stage 1: Overall Quality Improve
print("Running Stage 1: Overall restoration")
os.chdir("./Global")
stage_1_input_dir = opts.input_folder
stage_1_output_dir = os.path.join(opts.output_folder, "stage_1_restore_output")
if not os.path.exists(stage_1_output_dir):
os.makedirs(stage_1_output_dir)
if not opts.with_scratch:
stage_1_command = (
"python test.py --test_mode Full --Quality_restore --test_input "
+ stage_1_input_dir
+ " --outputs_dir "
+ stage_1_output_dir
+ " --gpu_ids "
+ gpu1
)
run_cmd(stage_1_command)
else:
mask_dir = os.path.join(stage_1_output_dir, "masks")
new_input = os.path.join(mask_dir, "input")
new_mask = os.path.join(mask_dir, "mask")
stage_1_command_1 = (
"python detection.py --test_path "
+ stage_1_input_dir
+ " --output_dir "
+ mask_dir
+ " --input_size full_size"
+ " --GPU "
+ gpu1
)
if opts.HR:
HR_suffix=" --HR"
else:
HR_suffix=""
stage_1_command_2 = (
"python test.py --Scratch_and_Quality_restore --test_input "
+ new_input
+ " --test_mask "
+ new_mask
+ " --outputs_dir "
+ stage_1_output_dir
+ " --gpu_ids "
+ gpu1 + HR_suffix
)
run_cmd(stage_1_command_1)
run_cmd(stage_1_command_2)
## Solve the case when there is no face in the old photo
stage_1_results = os.path.join(stage_1_output_dir, "restored_image")
stage_4_output_dir = os.path.join(opts.output_folder, "final_output")
if not os.path.exists(stage_4_output_dir):
os.makedirs(stage_4_output_dir)
for x in os.listdir(stage_1_results):
img_dir = os.path.join(stage_1_results, x)
shutil.copy(img_dir, stage_4_output_dir)
print("Finish Stage 1 ...")
print("\n")
## Stage 2: Face Detection
print("Running Stage 2: Face Detection")
os.chdir(".././Face_Detection")
stage_2_input_dir = os.path.join(stage_1_output_dir, "restored_image")
stage_2_output_dir = os.path.join(opts.output_folder, "stage_2_detection_output")
if not os.path.exists(stage_2_output_dir):
os.makedirs(stage_2_output_dir)
if opts.HR:
stage_2_command = (
"python detect_all_dlib_HR.py --url " + stage_2_input_dir + " --save_url " + stage_2_output_dir
)
else:
stage_2_command = (
"python detect_all_dlib.py --url " + stage_2_input_dir + " --save_url " + stage_2_output_dir
)
run_cmd(stage_2_command)
print("Finish Stage 2 ...")
print("\n")
## Stage 3: Face Restore
print("Running Stage 3: Face Enhancement")
os.chdir(".././Face_Enhancement")
stage_3_input_mask = "./"
stage_3_input_face = stage_2_output_dir
stage_3_output_dir = os.path.join(opts.output_folder, "stage_3_face_output")
if not os.path.exists(stage_3_output_dir):
os.makedirs(stage_3_output_dir)
if opts.HR:
opts.checkpoint_name='FaceSR_512'
stage_3_command = (
"python test_face.py --old_face_folder "
+ stage_3_input_face
+ " --old_face_label_folder "
+ stage_3_input_mask
+ " --tensorboard_log --name "
+ opts.checkpoint_name
+ " --gpu_ids "
+ gpu1
+ " --load_size 512 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 1 --results_dir "
+ stage_3_output_dir
+ " --no_parsing_map"
)
else:
stage_3_command = (
"python test_face.py --old_face_folder "
+ stage_3_input_face
+ " --old_face_label_folder "
+ stage_3_input_mask
+ " --tensorboard_log --name "
+ opts.checkpoint_name
+ " --gpu_ids "
+ gpu1
+ " --load_size 256 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 4 --results_dir "
+ stage_3_output_dir
+ " --no_parsing_map"
)
run_cmd(stage_3_command)
print("Finish Stage 3 ...")
print("\n")
## Stage 4: Warp back
print("Running Stage 4: Blending")
os.chdir(".././Face_Detection")
stage_4_input_image_dir = os.path.join(stage_1_output_dir, "restored_image")
stage_4_input_face_dir = os.path.join(stage_3_output_dir, "each_img")
stage_4_output_dir = os.path.join(opts.output_folder, "final_output")
if not os.path.exists(stage_4_output_dir):
os.makedirs(stage_4_output_dir)
if opts.HR:
stage_4_command = (
"python align_warp_back_multiple_dlib_HR.py --origin_url "
+ stage_4_input_image_dir
+ " --replace_url "
+ stage_4_input_face_dir
+ " --save_url "
+ stage_4_output_dir
)
else:
stage_4_command = (
"python align_warp_back_multiple_dlib.py --origin_url "
+ stage_4_input_image_dir
+ " --replace_url "
+ stage_4_input_face_dir
+ " --save_url "
+ stage_4_output_dir
)
run_cmd(stage_4_command)
print("Finish Stage 4 ...")
print("\n")
print("All the processing is done. Please check the results.")
|
15c75a3894f098169e028435ab76c88c3645ba92
|
75bc9ec750d31cf9bdf815dc92b489c958fcf396
|
/script/panel_data/alpha98InPython.py
|
6e2d8ad7e78d7280f372a8cce56f397b53eb33b9
|
[
"Apache-2.0"
] |
permissive
|
dolphindb/Tutorials_CN
|
624da0a6f18fc786095bb9076c31c911413c0b10
|
d561e38f850319e3b134c50cf7a37e61aedc19ea
|
refs/heads/master
| 2023-09-01T00:44:51.367999
| 2023-08-30T09:35:09
| 2023-08-30T09:35:09
| 139,725,144
| 286
| 74
|
Apache-2.0
| 2019-04-11T07:57:29
| 2018-07-04T13:21:56
|
C++
|
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
alpha98InPython.py
|
import dolphindb as ddb
import numpy as np
import pandas as pd
import time
s = ddb.session()
s.connect("ip", {port}, "admin", "123456")
vwap=s.run("vwap1")
open=s.run("open1")
vol=s.run("vol1")
vwap.set_index("tradetime", inplace=True)
open.set_index("tradetime", inplace=True)
vol.set_index("tradetime", inplace=True)
def myrank(x):
return ((x.rank(axis=1,method='min'))-1)/x.shape[1]
def imin(x):
return np.where(x==min(x))[0][0]
def rank(x):
s = pd.Series(x)
return (s.rank(ascending=True, method="min")[len(s)-1])-1
def alpha98(vwap, open, vol):
return myrank(vwap.rolling(5).corr(vol.rolling(5).mean().rolling(26).sum()).rolling(7).apply(lambda x: np.sum(np.arange(1, 8)*x)/np.sum(np.arange(1, 8)))) - myrank((9 - myrank(open).rolling(21).corr(myrank(vol.rolling(15).mean())).rolling(9).apply(imin)).rolling(7).apply(rank).rolling(8).apply(lambda x: np.sum(np.arange(1, 9)*x)/np.sum(np.arange(1, 9))))
start_time = time.time()
re=alpha98(vwap, open, vol)
print("--- %s seconds ---" % (time.time() - start_time))
|
b25ffb2c0dfcee92fd964e1d78d1ebdb4197ba59
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/Matcher.py
|
503ae5008fb577bd85be5daed4385c9093c7f860
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
Matcher.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Matcher(object):
def __init__(self):
self._identity_card = None
self._mobile_no = None
self._open_id = None
self._user_id = None
@property
def identity_card(self):
return self._identity_card
@identity_card.setter
def identity_card(self, value):
self._identity_card = value
@property
def mobile_no(self):
return self._mobile_no
@mobile_no.setter
def mobile_no(self, value):
self._mobile_no = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.identity_card:
if hasattr(self.identity_card, 'to_alipay_dict'):
params['identity_card'] = self.identity_card.to_alipay_dict()
else:
params['identity_card'] = self.identity_card
if self.mobile_no:
if hasattr(self.mobile_no, 'to_alipay_dict'):
params['mobile_no'] = self.mobile_no.to_alipay_dict()
else:
params['mobile_no'] = self.mobile_no
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Matcher()
if 'identity_card' in d:
o.identity_card = d['identity_card']
if 'mobile_no' in d:
o.mobile_no = d['mobile_no']
if 'open_id' in d:
o.open_id = d['open_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
6edcc8a8bddb0aa96488597d8fe500403108d775
|
32712c478ff9dff44de085cb50a1302bfc2eba67
|
/users/apps.py
|
f50e9e449ca4a60836599d9628440e00a92cba8d
|
[
"MIT"
] |
permissive
|
vas3k/vas3k.club
|
158af17c329fe693178ca1bce36466922604df3b
|
b3ff2fd95ef1d6c593c57d3bcd501240f2705fbb
|
refs/heads/master
| 2023-09-03T07:10:10.859004
| 2023-09-01T09:08:32
| 2023-09-01T09:08:32
| 254,190,180
| 697
| 326
|
MIT
| 2023-09-04T09:02:12
| 2020-04-08T20:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 128
|
py
|
apps.py
|
import logging
from django.apps import AppConfig
log = logging.getLogger()
class UsersConfig(AppConfig):
name = "users"
|
ee8eb6336abea5f88037098c4c84cf0fe301774d
|
f509ab9825c542e09b0c6591d86ef1f9feb540a6
|
/pkgs/filetransferutils-pkg/src/genie/libs/filetransferutils/plugins/iosxr/http/fileutils.py
|
57e1dd37fe4ab148e60acc7e0b2ed79afa3dfd3f
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genielibs
|
97f597117193aaa18028defeb69078ebb241173a
|
e42e51475cddcb10f5c7814d0fe892ac865742ba
|
refs/heads/master
| 2023-08-11T16:39:41.959947
| 2023-07-27T17:58:42
| 2023-07-27T17:58:42
| 130,717,047
| 109
| 60
|
Apache-2.0
| 2023-08-29T22:32:08
| 2018-04-23T15:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 331
|
py
|
fileutils.py
|
""" File utils base class for HTTP on IOSXR devices. """
from ..fileutils import FileUtils as FileUtilsXRBase
class FileUtils(FileUtilsXRBase):
COPY_CONFIG_TEMPLATE = ['http client source-interface ipv4 {interface}']
COPY_CONFIG_VRF_TEMPLATE = ['http client vrf {vrf}', 'http client source-interface ipv4 {interface}']
|
f01677eb8e4d08bd3b9a409e0ccffc454c1807a4
|
830dcda30c0ef820474f4acb3364f0cfdee5ae01
|
/scalabel/label/__init__.py
|
2f69d9558051b646c77739d270e9f69b653cf50e
|
[
"Apache-2.0"
] |
permissive
|
scalabel/scalabel
|
e9418168466312db452278415e202eb9922142b8
|
96ad0fffe06a3c9bdd83453c8ec9b70cbbbde641
|
refs/heads/master
| 2023-08-20T13:38:13.950520
| 2023-06-30T18:49:19
| 2023-06-30T18:49:19
| 222,340,751
| 530
| 130
|
Apache-2.0
| 2023-06-30T18:49:20
| 2019-11-18T01:43:08
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 98
|
py
|
__init__.py
|
"""Label definition and conversion."""
from . import coco_typing, from_coco, io, to_coco, typing
|
032a34c0a4aa4f54423eb3930a04dbc8c2e69755
|
849364a9b65ac32feab67dd3bb86119a0102f048
|
/tests/test36/t2.py
|
1fcf846b2b2b2bdfb02a3dd6be3a6bb78129c599
|
[] |
no_license
|
zokis/Python--Faster-Way
|
585b46e50cc70c6b4f3b026d3b82ba2705f6fc6b
|
8f11e9246198c6bc3c0c58668674d75188c966ae
|
refs/heads/gh-pages
| 2022-07-28T18:50:54.342599
| 2015-07-02T19:43:18
| 2015-07-02T19:43:18
| 12,438,963
| 200
| 31
| null | 2018-03-25T16:12:12
| 2013-08-28T16:35:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 82
|
py
|
t2.py
|
def a():
r = []
for x in range(5):
r.append([0] * 5)
return r
|
64f869d8da0790eb14384b7a14ce1b8fa1467ed1
|
ee87c715e5d937b0380ddb87d56e9ebc4877a02b
|
/asv_benchmarks/benchmarks/model_selection.py
|
335ffe498adaa6187b128398e2c303800db71fa8
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-learn/scikit-learn
|
27a2196f3173e0f32f7a5c5d652b70a6c57c7644
|
061f8777b48e5491b0c57bb8e0bc7067c103079d
|
refs/heads/main
| 2023-08-18T15:32:59.764468
| 2023-08-18T14:39:08
| 2023-08-18T14:39:08
| 843,222
| 58,456
| 29,777
|
BSD-3-Clause
| 2023-09-14T19:08:34
| 2010-08-17T09:43:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
model_selection.py
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score
from .common import Benchmark, Estimator, Predictor
from .datasets import _synth_classification_dataset
from .utils import make_gen_classif_scorers
class CrossValidationBenchmark(Benchmark):
"""
Benchmarks for Cross Validation.
"""
timeout = 20000
param_names = ["n_jobs"]
params = (Benchmark.n_jobs_vals,)
def setup(self, *params):
(n_jobs,) = params
data = _synth_classification_dataset(n_samples=50000, n_features=100)
self.X, self.X_val, self.y, self.y_val = data
self.clf = RandomForestClassifier(n_estimators=50, max_depth=10, random_state=0)
cv = 16 if Benchmark.data_size == "large" else 4
self.cv_params = {"n_jobs": n_jobs, "cv": cv}
def time_crossval(self, *args):
cross_val_score(self.clf, self.X, self.y, **self.cv_params)
def peakmem_crossval(self, *args):
cross_val_score(self.clf, self.X, self.y, **self.cv_params)
def track_crossval(self, *args):
return float(cross_val_score(self.clf, self.X, self.y, **self.cv_params).mean())
class GridSearchBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for GridSearch.
"""
timeout = 20000
param_names = ["n_jobs"]
params = (Benchmark.n_jobs_vals,)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(n_samples=10000, n_features=100)
return data
def make_estimator(self, params):
(n_jobs,) = params
clf = RandomForestClassifier(random_state=0)
if Benchmark.data_size == "large":
n_estimators_list = [10, 25, 50, 100, 500]
max_depth_list = [5, 10, None]
max_features_list = [0.1, 0.4, 0.8, 1.0]
else:
n_estimators_list = [10, 25, 50]
max_depth_list = [5, 10]
max_features_list = [0.1, 0.4, 0.8]
param_grid = {
"n_estimators": n_estimators_list,
"max_depth": max_depth_list,
"max_features": max_features_list,
}
estimator = GridSearchCV(clf, param_grid, n_jobs=n_jobs, cv=4)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
|
06fc8a2b22d25b3b88da73fac87a68c45bcb040e
|
3bc139860403ebd05e278c95fca26e24d5189271
|
/chia/rpc/crawler_rpc_api.py
|
b4122f0f1957afe0085ea457267b0f130a2221a5
|
[
"Apache-2.0"
] |
permissive
|
Chia-Network/chia-blockchain
|
a09183b7240b159419b45f8373a41a1062f77ef3
|
d966f3f9e63aed52dbd73544164202a9f11ce3d2
|
refs/heads/main
| 2023-08-31T09:37:13.741283
| 2023-08-30T18:27:22
| 2023-08-30T18:27:22
| 197,153,676
| 12,936
| 2,474
|
Apache-2.0
| 2023-09-14T19:08:51
| 2019-07-16T08:32:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,640
|
py
|
crawler_rpc_api.py
|
from __future__ import annotations
import ipaddress
from typing import Any, Dict, List, Optional
from chia.rpc.rpc_server import Endpoint, EndpointResult
from chia.seeder.crawler import Crawler
from chia.util.ws_message import WsRpcMessage, create_payload_dict
class CrawlerRpcApi:
def __init__(self, crawler: Crawler):
self.service = crawler
self.service_name = "chia_crawler"
def get_routes(self) -> Dict[str, Endpoint]:
return {
"/get_peer_counts": self.get_peer_counts,
"/get_ips_after_timestamp": self.get_ips_after_timestamp,
}
async def _state_changed(self, change: str, change_data: Optional[Dict[str, Any]] = None) -> List[WsRpcMessage]:
payloads = []
if change_data is None:
change_data = await self.get_peer_counts({})
if change in ("crawl_batch_completed", "loaded_initial_peers"):
payloads.append(create_payload_dict(change, change_data, self.service_name, "metrics"))
return payloads
async def get_peer_counts(self, _request: Dict[str, Any]) -> EndpointResult:
ipv6_addresses_count = 0
for host in self.service.best_timestamp_per_peer.keys():
try:
ipaddress.IPv6Address(host)
ipv6_addresses_count += 1
except ipaddress.AddressValueError:
continue
reliable_peers = 0
if self.service.crawl_store is not None:
reliable_peers = self.service.crawl_store.get_reliable_peers()
data = {
"peer_counts": {
"total_last_5_days": len(self.service.best_timestamp_per_peer),
"reliable_nodes": reliable_peers,
"ipv4_last_5_days": len(self.service.best_timestamp_per_peer) - ipv6_addresses_count,
"ipv6_last_5_days": ipv6_addresses_count,
"versions": self.service.versions,
}
}
return data
async def get_ips_after_timestamp(self, _request: Dict[str, Any]) -> EndpointResult:
after = _request.get("after", None)
if after is None:
raise ValueError("`after` is required and must be a unix timestamp")
offset = _request.get("offset", 0)
limit = _request.get("limit", 10000)
matched_ips: List[str] = []
for ip, timestamp in self.service.best_timestamp_per_peer.items():
if timestamp > after:
matched_ips.append(ip)
matched_ips.sort()
return {
"ips": matched_ips[offset : (offset + limit)],
"total": len(matched_ips),
}
|
91d076f37b688aa278ceb53acadb55adb17f8a87
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/tests/system/providers/google/cloud/compute/example_compute_igm.py
|
45449cf12574f5c334de6592bb88e7f7a9ec0011
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 8,930
|
py
|
example_compute_igm.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that:
* creates a copy of existing Instance Template
* updates existing template in Instance Group Manager
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.models.baseoperator import chain
from airflow.providers.google.cloud.operators.compute import (
ComputeEngineCopyInstanceTemplateOperator,
ComputeEngineDeleteInstanceGroupManagerOperator,
ComputeEngineDeleteInstanceTemplateOperator,
ComputeEngineInsertInstanceGroupManagerOperator,
ComputeEngineInsertInstanceTemplateOperator,
ComputeEngineInstanceGroupUpdateManagerTemplateOperator,
)
from airflow.utils.trigger_rule import TriggerRule
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
PROJECT_ID = os.environ.get("SYSTEM_TESTS_GCP_PROJECT")
LOCATION = "europe-west1-b"
REGION = "europe-west1"
SHORT_MACHINE_TYPE_NAME = "n1-standard-1"
DAG_ID = "cloud_compute_igm"
# [START howto_operator_compute_template_copy_args]
TEMPLATE_NAME = "instance-template-igm-test"
NEW_TEMPLATE_NAME = "instance-template-test-new"
INSTANCE_TEMPLATE_BODY = {
"name": TEMPLATE_NAME,
"properties": {
"machine_type": SHORT_MACHINE_TYPE_NAME,
"disks": [
{
"auto_delete": True,
"boot": True,
"device_name": TEMPLATE_NAME,
"initialize_params": {
"disk_size_gb": "10",
"disk_type": "pd-balanced",
"source_image": "projects/debian-cloud/global/images/debian-11-bullseye-v20220621",
},
}
],
"network_interfaces": [{"network": "global/networks/default"}],
},
}
NEW_DESCRIPTION = "Test new description"
INSTANCE_TEMPLATE_BODY_UPDATE = {
"name": NEW_TEMPLATE_NAME,
"description": NEW_DESCRIPTION,
"properties": {"machine_type": "n1-standard-2"},
}
# [END howto_operator_compute_template_copy_args]
# [START howto_operator_compute_igm_update_template_args]
INSTANCE_GROUP_MANAGER_NAME = "instance-group-test"
INSTANCE_GROUP_MANAGER_BODY = {
"name": INSTANCE_GROUP_MANAGER_NAME,
"base_instance_name": INSTANCE_GROUP_MANAGER_NAME,
"instance_template": f"global/instanceTemplates/{TEMPLATE_NAME}",
"target_size": 1,
}
SOURCE_TEMPLATE_URL = (
f"https://www.googleapis.com/compute/beta/projects/{PROJECT_ID}/"
f"global/instanceTemplates/{TEMPLATE_NAME}"
)
DESTINATION_TEMPLATE_URL = (
f"https://www.googleapis.com/compute/beta/projects/{PROJECT_ID}/"
f"global/instanceTemplates/{NEW_TEMPLATE_NAME}"
)
UPDATE_POLICY = {
"type": "OPPORTUNISTIC",
"minimalAction": "RESTART",
"maxSurge": {"fixed": 1},
"minReadySec": 1800,
}
# [END howto_operator_compute_igm_update_template_args]
with models.DAG(
DAG_ID,
schedule="@once",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example"],
) as dag:
# [START howto_operator_gce_igm_insert_template]
gce_instance_template_insert = ComputeEngineInsertInstanceTemplateOperator(
task_id="gcp_compute_create_template_task",
project_id=PROJECT_ID,
body=INSTANCE_TEMPLATE_BODY,
)
# [END howto_operator_gce_igm_insert_template]
# Added to check for idempotence
# [START howto_operator_gce_igm_insert_template_no_project_id]
gce_instance_template_insert2 = ComputeEngineInsertInstanceTemplateOperator(
task_id="gcp_compute_create_template_task_2",
body=INSTANCE_TEMPLATE_BODY,
)
# [END howto_operator_gce_igm_insert_template_no_project_id]
# [START howto_operator_gce_igm_copy_template]
gce_instance_template_copy = ComputeEngineCopyInstanceTemplateOperator(
task_id="gcp_compute_igm_copy_template_task",
project_id=PROJECT_ID,
resource_id=TEMPLATE_NAME,
body_patch=INSTANCE_TEMPLATE_BODY_UPDATE,
)
# [END howto_operator_gce_igm_copy_template]
# Added to check for idempotence
# [START howto_operator_gce_igm_copy_template_no_project_id]
gce_instance_template_copy2 = ComputeEngineCopyInstanceTemplateOperator(
task_id="gcp_compute_igm_copy_template_task_2",
resource_id=TEMPLATE_NAME,
body_patch=INSTANCE_TEMPLATE_BODY_UPDATE,
)
# [END howto_operator_gce_igm_copy_template_no_project_id]
# [START howto_operator_gce_insert_igm]
gce_igm_insert = ComputeEngineInsertInstanceGroupManagerOperator(
task_id="gcp_compute_create_group_task",
zone=LOCATION,
body=INSTANCE_GROUP_MANAGER_BODY,
project_id=PROJECT_ID,
)
# [END howto_operator_gce_insert_igm]
# Added to check for idempotence
# [START howto_operator_gce_insert_igm_no_project_id]
gce_igm_insert2 = ComputeEngineInsertInstanceGroupManagerOperator(
task_id="gcp_compute_create_group_task_2",
zone=LOCATION,
body=INSTANCE_GROUP_MANAGER_BODY,
)
# [END howto_operator_gce_insert_igm_no_project_id]
# [START howto_operator_gce_igm_update_template]
gce_instance_group_manager_update_template = ComputeEngineInstanceGroupUpdateManagerTemplateOperator(
task_id="gcp_compute_igm_group_manager_update_template",
project_id=PROJECT_ID,
resource_id=INSTANCE_GROUP_MANAGER_NAME,
zone=LOCATION,
source_template=SOURCE_TEMPLATE_URL,
destination_template=DESTINATION_TEMPLATE_URL,
update_policy=UPDATE_POLICY,
)
# [END howto_operator_gce_igm_update_template]
# Added to check for idempotence (and without UPDATE_POLICY)
# [START howto_operator_gce_igm_update_template_no_project_id]
gce_instance_group_manager_update_template2 = ComputeEngineInstanceGroupUpdateManagerTemplateOperator(
task_id="gcp_compute_igm_group_manager_update_template_2",
resource_id=INSTANCE_GROUP_MANAGER_NAME,
zone=LOCATION,
source_template=SOURCE_TEMPLATE_URL,
destination_template=DESTINATION_TEMPLATE_URL,
)
# [END howto_operator_gce_igm_update_template_no_project_id]
# [START howto_operator_gce_delete_old_template_no_project_id]
gce_instance_template_old_delete = ComputeEngineDeleteInstanceTemplateOperator(
task_id="gcp_compute_delete_old_template_task",
resource_id=TEMPLATE_NAME,
)
# [END howto_operator_gce_delete_old_template_no_project_id]
gce_instance_template_old_delete.trigger_rule = TriggerRule.ALL_DONE
# [START howto_operator_gce_delete_new_template_no_project_id]
gce_instance_template_new_delete = ComputeEngineDeleteInstanceTemplateOperator(
task_id="gcp_compute_delete_new_template_task",
resource_id=NEW_TEMPLATE_NAME,
)
# [END howto_operator_gce_delete_new_template_no_project_id]
gce_instance_template_new_delete.trigger_rule = TriggerRule.ALL_DONE
# [START howto_operator_gce_delete_igm_no_project_id]
gce_igm_delete = ComputeEngineDeleteInstanceGroupManagerOperator(
task_id="gcp_compute_delete_group_task",
resource_id=INSTANCE_GROUP_MANAGER_NAME,
zone=LOCATION,
)
# [END howto_operator_gce_delete_igm_no_project_id]
gce_igm_delete.trigger_rule = TriggerRule.ALL_DONE
chain(
gce_instance_template_insert,
gce_instance_template_insert2,
gce_instance_template_copy,
gce_instance_template_copy2,
gce_igm_insert,
gce_igm_insert2,
gce_instance_group_manager_update_template,
gce_instance_group_manager_update_template2,
gce_igm_delete,
gce_instance_template_old_delete,
gce_instance_template_new_delete,
)
# ### Everything below this line is not part of example ###
# ### Just for system tests purpose ###
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "tearDown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
|
3527571d8a56aa7bebf5970a35877cb15c7f75c3
|
4b531117b09c8a60745b61c1b8b7f00f62a16a1c
|
/plugin.video.kaiyan/addon.py
|
48014973fa03868b2ca373565a579ad41952a7ef
|
[
"MIT"
] |
permissive
|
zhengfan2014/xbmc-kodi-private-china-addons
|
ce68c8ba991a5aceefbffccf60d6bb8191cc4af8
|
984856b358da0a1423df7010bb474e1727270429
|
refs/heads/py2
| 2022-12-15T07:16:21.266240
| 2021-08-13T13:12:05
| 2021-08-13T13:12:05
| 244,306,237
| 527
| 90
|
MIT
| 2021-11-30T07:34:33
| 2020-03-02T07:23:14
|
Python
|
UTF-8
|
Python
| false
| false
| 7,410
|
py
|
addon.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import re
from xbmcswift2 import Plugin
import requests
from bs4 import BeautifulSoup
import xbmcgui
import base64
import json
import urllib2
import sys
import HTMLParser
import re
def get_real_url(url):
rs = requests.get(url,headers=headers,timeout=2)
return rs.url
def unescape(string):
string = urllib2.unquote(string).decode('utf8')
quoted = HTMLParser.HTMLParser().unescape(string).encode('utf-8')
#转成中文
return re.sub(r'%u([a-fA-F0-9]{4}|[a-fA-F0-9]{2})', lambda m: unichr(int(m.group(1), 16)), quoted)
plugin = Plugin()
headers = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
def get_categories():
return [{'name':'推荐','link':'http://baobab.kaiyanapp.com/api/v5/index/tab/allRec?page=0'},
{'name':'周排行','link':'http://baobab.kaiyanapp.com/api/v4/rankList/videos?strategy=weekly&num=8&start='},
{'name':'月排行','link':'http://baobab.kaiyanapp.com/api/v4/rankList/videos?strategy=monthly&num=8&start='},
{'name':'专题','link':'http://baobab.kaiyanapp.com/api/v3/specialTopics?start=0&num=10'}]
def get_tuijian_videos(url):
#爬视频列表的
videos = []
rec = requests.get(url,headers=headers)
#print(rec.text)
j = json.loads(rec.text)
#标题
#print(j['itemList'][0]['data']['header']['title'] + ' - ' + j['itemList'][0]['data']['header']['subTitle'])
#视频数:
#print(j['itemList'][0]['data'])
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', j['itemList'][0]['data']['itemList'][0]['data']['content']['data']['playUrl'])
for index in range(len(j['itemList'][0]['data']['itemList'])):
videoitem = {}
videoitem['name'] = j['itemList'][0]['data']['itemList'][index]['data']['content']['data']['title']
videoitem['href'] = j['itemList'][0]['data']['itemList'][index]['data']['content']['data']['playUrl']
videoitem['thumb'] = j['itemList'][0]['data']['itemList'][index]['data']['content']['data']['cover']['feed']
videos.append(videoitem)
#标题
#print(j['itemList'][0]['data']['itemList'][index]['data']['content']['data']['title'])
#图片
#print(j['itemList'][0]['data']['itemList'][index]['data']['content']['data']['cover']['feed'])
#mp4
#print(j['itemList'][0]['data']['itemList'][index]['data']['content']['data']['playUrl'])
for index in range(len(j['itemList'])):
if j['itemList'][index]['type'] == 'videoSmallCard' or j['itemList'][index]['type'] == 'FollowCard':
videoitem = {}
videoitem['name'] = '['+j['itemList'][index]['data']['category']+'] - ' + j['itemList'][index]['data']['title']
videoitem['href'] = j['itemList'][index]['data']['playUrl']
videoitem['thumb'] = j['itemList'][index]['data']['cover']['feed']
videos.append(videoitem)
#print(j['itemList'][index]['data']['category']+'] - ' + j['itemList'][index]['data']['title'])
#print(j['itemList'][index]['data']['cover']['feed'])
#print(j['itemList'][index]['data']['playUrl'])
return videos
def get_paihang_videos(url):
#爬视频列表的
listnum = 8
num = 0
rank=1
videos = []
while listnum == 8:
rec = requests.get(url + str(num*8),headers=headers)
#print(rec.text)
j = json.loads(rec.text)
listnum = len(j['itemList'])
num += 1
for index in range(len(j['itemList'])):
videoitem = {}
videoitem['name'] = '['+str(rank)+ ']' + j['itemList'][index]['data']['category'] + ' - ' + j['itemList'][index]['data']['title']
videoitem['href'] = j['itemList'][index]['data']['playUrl']
videoitem['thumb'] = j['itemList'][index]['data']['cover']['feed']
videoitem['genre'] = '豆瓣电影'
videos.append(videoitem)
rank += 1
#标题
#print(j['itemList'][index]['data']['category'] + ' - ' + j['itemList'][index]['data']['title'])
#图片
#print(j['itemList'][index]['data']['cover']['feed'])
#mp4
#print(j['itemList'][index]['data']['playUrl'])
return videos
for videoelement in videoelements:
videoitem = {}
videoitem['name'] = videoelement.find('img')['alt']
videoitem['href'] = videoelement.find('a')['href']
#videoitem['thumb'] = 'aaaa'
videoitem['genre'] = '豆瓣电影'
videos.append(videoitem)
return videos
def get_zhuanti_videos(category):
#爬视频列表的
# if int(page) == 1:
# pageurl = category
# else:
# pageurl = category + 'index_'+page+'.html'
pageurl = category
r = requests.get(pageurl, headers=headers)
r.encoding = 'UTF-8'
soup = BeautifulSoup(r.text, "html.parser")
videos = []
#videoelements = soup.find('ul', id='list1').find_all('li')
#videoelements = contenter.find_all("a", attrs={"data-original": True})
videoelements = soup.find_all('li',class_='rank-item')
if videoelements is None:
dialog = xbmcgui.Dialog()
ok = dialog.ok('错误提示', '没有播放源')
else:
#dialog = xbmcgui.Dialog()
#sss = str(len(videoelements))
#ok = dialog.ok('video数量', sss)
for videoelement in videoelements:
videoitem = {}
videoitem['name'] = videoelement.find('img')['alt']
videoitem['href'] = videoelement.find('a')['href']
#videoitem['thumb'] = 'aaaa'
videoitem['genre'] = '豆瓣电影'
videos.append(videoitem)
return videos
@plugin.route('/play/<name>/<url>/')
def play(name,url):
items = []
item = {'label': name,'path': get_real_url(url),'is_playable': True}
items.append(item)
return items
@plugin.route('/category/<name>/<url>/')
def category(name,url):
#dialog = xbmcgui.Dialog()
#ok = dialog.ok('错误提示', url)
if name == '推荐':
#tuijian
videos = get_tuijian_videos(url)
else:
if name == '周排行':
#2
videos = get_paihang_videos(url)
else:
if name == '月排行':
#3
videos = get_paihang_videos(url)
else:
#4
videos = get_videos(url)
items = [{
'label': video['name'],
'path': plugin.url_for('play', name='最高清晰度 - 本地解析' , url=video['href']),
'thumbnail': video['thumb'],
'icon': video['thumb'],
} for video in videos]
sorted_items = items
#sorted_items = sorted(items, key=lambda item: item['label'])
return sorted_items
@plugin.route('/')
def index():
categories = get_categories()
items = [{
'label': category['name'],
'path': plugin.url_for('category', name=category['name'] , url=category['link']),
} for category in categories]
return items
@plugin.route('/labels/<label>/')
def show_label(label):
# 写抓取视频类表的方法
#
items = [
{'label': label},
]
return items
if __name__ == '__main__':
plugin.run()
|
497f198069361e99344c231c76b64b6fabf22629
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-VideoSubscriberAccount/PyObjCTest/test_vssubscription.py
|
4ad2ba6b60e3c7f7d2e1994fbd32cf3541573c76
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
test_vssubscription.py
|
from PyObjCTools.TestSupport import TestCase
import VideoSubscriberAccount
class TestVSSubscription(TestCase):
def test_enum_types(self):
self.assertIsEnumType(VideoSubscriberAccount.VSSubscriptionAccessLevel)
def testConstants(self):
self.assertEqual(VideoSubscriberAccount.VSSubscriptionAccessLevelUnknown, 0)
self.assertEqual(
VideoSubscriberAccount.VSSubscriptionAccessLevelFreeWithAccount, 1
)
self.assertEqual(VideoSubscriberAccount.VSSubscriptionAccessLevelPaid, 2)
def testClasses(self):
VideoSubscriberAccount.VSSubscription
|
be1c21ee7b686f9fc44ded1b3ee9085085a74e50
|
d037fecd7d32a97d6e38d27dbbf574bbd825054a
|
/homu/comments.py
|
7a1c97965ef6d46fb6845d79282580ca6e1df372
|
[
"MIT"
] |
permissive
|
rust-lang/homu
|
e83b164ffb0b76e3d03e41a3b7d67f20ce2f6d00
|
c6926e6c10bd9725b617777a3d45ae57d21b96ed
|
refs/heads/master
| 2023-09-01T04:03:08.849152
| 2023-06-11T13:46:18
| 2023-06-11T13:46:18
| 127,407,146
| 148
| 63
|
MIT
| 2023-09-03T15:40:02
| 2018-03-30T09:06:01
|
Python
|
UTF-8
|
Python
| false
| false
| 5,132
|
py
|
comments.py
|
import json
class Comment:
def __init__(self, **args):
if len(args) != len(self.params):
raise KeyError("different number of params")
for key, value in args.items():
if key in self.params:
setattr(self, key, value)
else:
raise KeyError("unknown attribute: %s" % key)
def jsonify(self):
out = {"type": self.__class__.__name__}
for param in self.params:
out[param] = getattr(self, param)
return json.dumps(out, separators=(',', ':'))
class Approved(Comment):
def __init__(self, bot=None, **args):
# Because homu needs to leave a comment for itself to kick off a build,
# we need to know the correct botname to use. However, we don't want to
# save that botname in our state JSON. So we need a custom constructor
# to grab the botname and delegate the rest of the keyword args to the
# Comment constructor.
super().__init__(**args)
self.bot = bot
params = ["sha", "approver", "queue"]
def render(self):
# The comment here is required because Homu wants a full, unambiguous,
# pinned commit hash to kick off the build, and this note-to-self is
# how it gets it. This is to safeguard against situations where Homu
# reloads and another commit has been pushed since the approval.
message = ":pushpin: Commit {sha} has been " + \
"approved by `{approver}`\n\n" + \
"It is now in the [queue]({queue}) for this repository.\n\n" + \
"<!-- @{bot} r={approver} {sha} -->"
return message.format(
sha=self.sha,
approver=self.approver,
bot=self.bot,
queue=self.queue
)
class ApprovalIgnoredWip(Comment):
def __init__(self, wip_keyword=None, **args):
# We want to use the wip keyword in the message, but not in the json
# blob.
super().__init__(**args)
self.wip_keyword = wip_keyword
params = ["sha"]
def render(self):
message = ':clipboard:' + \
' Looks like this PR is still in progress,' + \
' ignoring approval.\n\n' + \
'Hint: Remove **{wip_keyword}** from this PR\'s title when' + \
' it is ready for review.'
return message.format(wip_keyword=self.wip_keyword)
class Delegated(Comment):
def __init__(self, bot=None, **args):
# Because homu needs to leave a comment for the delegated person,
# we need to know the correct botname to use. However, we don't want to
# save that botname in our state JSON. So we need a custom constructor
# to grab the botname and delegate the rest of the keyword args to the
# Comment constructor.
super().__init__(**args)
self.bot = bot
params = ["delegator", "delegate"]
def render(self):
message = \
':v: @{delegate}, you can now approve this pull request!\n\n' + \
'If @{delegator} told you to "`r=me`" after making some ' + \
'further change, please make that change, then do ' + \
'`@{bot} r=@{delegator}`'
return message.format(
delegate=self.delegate,
bot=self.bot,
delegator=self.delegator
)
class BuildStarted(Comment):
params = ["head_sha", "merge_sha"]
def render(self):
return ":hourglass: Testing commit %s with merge %s..." % (
self.head_sha, self.merge_sha,
)
class TryBuildStarted(Comment):
params = ["head_sha", "merge_sha"]
def render(self):
return ":hourglass: Trying commit %s with merge %s..." % (
self.head_sha, self.merge_sha,
)
class BuildCompleted(Comment):
params = ["approved_by", "base_ref", "builders", "merge_sha"]
def render(self):
urls = ", ".join(
"[%s](%s)" % kv for kv in sorted(self.builders.items())
)
return (
":sunny: Test successful - %s\n"
"Approved by: %s\n"
"Pushing %s to %s..."
% (
urls, self.approved_by, self.merge_sha, self.base_ref,
)
)
class TryBuildCompleted(Comment):
params = ["builders", "merge_sha"]
def render(self):
urls = ", ".join(
"[%s](%s)" % kv for kv in sorted(self.builders.items())
)
return ":sunny: Try build successful - %s\nBuild commit: %s (`%s`)" % (
urls, self.merge_sha, self.merge_sha,
)
class BuildFailed(Comment):
params = ["builder_url", "builder_name"]
def render(self):
return ":broken_heart: Test failed - [%s](%s)" % (
self.builder_name, self.builder_url
)
class TryBuildFailed(Comment):
params = ["builder_url", "builder_name"]
def render(self):
return ":broken_heart: Test failed - [%s](%s)" % (
self.builder_name, self.builder_url
)
class TimedOut(Comment):
params = []
def render(self):
return ":boom: Test timed out"
|
805491572083fae86456a0fddb330da09180703c
|
e92b076a101aad48406de701400cfa230e66c313
|
/pyrep/robots/robot_component.py
|
7eab82425f8b3ef1153ac63ef56d50cd632f1ffd
|
[
"MIT"
] |
permissive
|
stepjam/PyRep
|
16b16647943586369b37a88a48de6477f3e8901f
|
5837640a7d9fa41c3d49db22d6b24e3a291b5048
|
refs/heads/master
| 2023-08-31T11:36:27.521442
| 2023-08-22T16:55:14
| 2023-08-22T16:55:14
| 193,898,092
| 619
| 198
|
MIT
| 2023-08-10T04:23:50
| 2019-06-26T12:15:58
|
Python
|
UTF-8
|
Python
| false
| false
| 10,392
|
py
|
robot_component.py
|
from typing import List, Tuple
from pyrep.objects.shape import Shape
from pyrep.backend import sim, utils
from pyrep.const import JointType
from pyrep.objects.object import Object
from pyrep.objects.joint import Joint
from pyrep.const import ObjectType, JointMode
class RobotComponent(Object):
"""Collection of joints representing arms, end effectors, mobile bases, etc.
"""
def __init__(self, count: int, name: str, joint_names: List[str],
base_name: str = None):
suffix = '' if count == 0 else '#%d' % (count - 1)
super().__init__(
name + suffix if base_name is None else base_name + suffix)
self._num_joints = len(joint_names)
# Joint handles
self.joints = [Joint(jname + suffix)
for jname in joint_names]
self._joint_handles = [j.get_handle() for j in self.joints]
def copy(self) -> 'RobotComponent':
"""Copy and pastes the arm in the scene.
The arm is copied together with all its associated calculation
objects and associated scripts.
:return: The new pasted arm.
"""
# Copy whole model
handle = sim.simCopyPasteObjects([self._handle], 1)[0]
name = sim.simGetObjectName(handle)
# Find the number of this arm
num = name[name.rfind('#') + 1:]
if len(num) > 0:
num = int(num) + 1
else:
num = 0
# FIXME: Pass valid name and joint_names.
return self.__class__(num) # type: ignore
def _get_requested_type(self) -> ObjectType:
"""Gets the type of the object.
:return: Type of the object.
"""
return ObjectType(sim.simGetObjectType(self.get_handle()))
def get_joint_count(self) -> int:
"""Gets the number of joints in this component.
:return: The number of joints.
"""
return self._num_joints
def get_joint_types(self) -> List[JointType]:
"""Retrieves the type of the joints in this component.
:return: A list containing the types of the joints.
"""
return [j.get_joint_type() for j in self.joints]
def get_joint_positions(self) -> List[float]:
"""Retrieves the intrinsic position of the joints.
See :py:meth:`Joint.get_joint_position` for more information.
:return: A list of intrinsic position of the joints.
"""
return [j.get_joint_position() for j in self.joints]
def set_joint_positions(self, positions: List[float],
disable_dynamics: bool = False) -> None:
"""Sets the intrinsic position of the joints.
See :py:meth:`Joint.set_joint_position` for more information.
:param disable_dynamics: If True, then the position can be set even
when the joint mode is in Force mode. It will disable dynamics,
move the joint, and then re-enable dynamics.
:param positions: A list of positions of the joints (angular or linear
values depending on the joint type).
"""
self._assert_len(positions)
if not disable_dynamics:
[sim.simSetJointPosition(jh, p) # type: ignore
for jh, p in zip(self._joint_handles, positions)]
return
is_model = self.is_model()
if not is_model:
self.set_model(True)
prior = sim.simGetModelProperty(self.get_handle())
p = prior | sim.sim_modelproperty_not_dynamic
# Disable the dynamics
sim.simSetModelProperty(self._handle, p)
with utils.step_lock:
sim.simExtStep(True) # Have to step for changes to take effect
[sim.simSetJointPosition(jh, p) # type: ignore
for jh, p in zip(self._joint_handles, positions)]
[j.set_joint_target_position(p) # type: ignore
for j, p in zip(self.joints, positions)]
with utils.step_lock:
sim.simExtStep(True) # Have to step for changes to take effect
# Re-enable the dynamics
sim.simSetModelProperty(self._handle, prior)
self.set_model(is_model)
def get_joint_target_positions(self) -> List[float]:
"""Retrieves the target positions of the joints.
:return: A list of target position of the joints (angular or linear
values depending on the joint type).
"""
return [j.get_joint_target_position() for j in self.joints]
def set_joint_target_positions(self, positions: List[float]) -> None:
"""Sets the target positions of the joints.
See :py:meth:`Joint.set_joint_target_position` for more information.
:param positions: List of target position of the joints (angular or
linear values depending on the joint type).
"""
self._assert_len(positions)
[j.set_joint_target_position(p) # type: ignore
for j, p in zip(self.joints, positions)]
def get_joint_target_velocities(self) -> List[float]:
"""Retrieves the intrinsic target velocities of the joints.
:return: List of the target velocity of the joints (linear or angular
velocity depending on the joint-type).
"""
return [j.get_joint_target_velocity() # type: ignore
for j in self.joints]
def set_joint_target_velocities(self, velocities: List[float]) -> None:
"""Sets the intrinsic target velocities of the joints.
:param velocities: List of the target velocity of the joints (linear
or angular velocities depending on the joint-type).
"""
self._assert_len(velocities)
[j.set_joint_target_velocity(v) # type: ignore
for j, v in zip(self.joints, velocities)]
def get_joint_forces(self) -> List[float]:
"""Retrieves the forces or torques of the joints.
See :py:meth:`Joint.get_joint_force` for more information.
:return: A list of the forces or the torques applied to the joints
along/about their z-axis.
"""
return [j.get_joint_force() for j in self.joints]
def set_joint_forces(self, forces: List[float]) -> None:
"""Sets the maximum force or torque that the joints can exert.
See :py:meth:`Joint.set_joint_force` for more information.
:param forces: The maximum force or torque that the joints can exert.
These cannot be negative values.
"""
self._assert_len(forces)
[j.set_joint_force(f) # type: ignore
for j, f in zip(self.joints, forces)]
def get_joint_velocities(self) -> List[float]:
"""Get the current joint velocities.
:return: List containing the velocities of the joints (linear or
angular velocities depending on the joint-type).
"""
return [j.get_joint_velocity() for j in self.joints]
def get_joint_intervals(self) -> Tuple[List[bool], List[List[float]]]:
"""Retrieves the interval parameters of the joints.
See :py:meth:`Joint.get_joint_interval` for more information.
:return: A tuple containing a list of bools indicates whether the joint
is cyclic (the joint varies between -pi and +pi in a cyclic manner),
and a 2D list containing the interval of the joints.
"""
cyclics, intervals = [], []
for j in self.joints:
c, i = j.get_joint_interval()
cyclics.append(c)
intervals.append(i)
return cyclics, intervals
def set_joint_intervals(self, cyclic: List[bool],
intervals: List[List[float]]) -> None:
"""Sets the interval parameters of the joints (i.e. range values).
See :py:meth:`Joint.set_joint_interval` for more information.
:param cyclic: List of bools indicates whether the joint is cyclic.
Only revolute joints with a pitch of 0 can be cyclic.
:param intervals: 2D list containing the intervals of the joints.
"""
self._assert_len(cyclic)
self._assert_len(intervals)
[j.set_joint_interval(c, i) # type: ignore
for j, c, i in zip( self.joints, cyclic, intervals)]
def get_joint_upper_velocity_limits(self) -> List[float]:
"""Gets upper velocity limits of the joints.
:return: List of the upper velocity limits.
"""
return [j.get_joint_upper_velocity_limit() for j in self.joints]
def set_control_loop_enabled(self, value: bool) -> None:
"""Sets whether the control loop is enable for all joints.
:param value: The new value for the control loop state.
"""
[j.set_control_loop_enabled(value) # type: ignore
for j in self.joints]
def set_motor_locked_at_zero_velocity(self, value: bool) -> None:
"""Sets if motor is locked when target velocity is zero for all joints.
When enabled in velocity mode and its target velocity is zero, then the
joint is locked in place.
:param value: If the motors should be locked at zero velocity.
"""
[j.set_motor_locked_at_zero_velocity(value) # type: ignore
for j in self.joints]
def set_joint_mode(self, value: JointMode) -> None:
"""Sets the operation mode of the joint group.
:param value: The new joint mode value.
"""
[j.set_joint_mode(value) # type: ignore
for j in self.joints]
def get_joint_modes(self) -> List[JointMode]:
"""Gets the operation mode of the joint group.
:return: A list of joint modes.
"""
return [j.get_joint_mode() for j in self.joints]
def get_visuals(self) -> List[Object]:
"""Gets a list of the visual elements of this component.
Can be useful for methods such as domain randomization.
Should ideally be overridden for each robot.
:return: A list of visual shapes.
"""
tree = self.get_objects_in_tree(ObjectType.SHAPE, exclude_base=False)
return [obj for obj in tree if 'visual' in obj.get_name()]
def _assert_len(self, inputs: list) -> None:
if len(self.joints) != len(inputs):
raise RuntimeError(
'Tried to set values for %d joints, but joint group consists '
'of %d joints.' % (len(inputs), len(self.joints)))
|
b00eac21fd4413883800c1e7449f02f5bd56755d
|
bc64ff33e398fea0d217fb96bf1a861bf68032c8
|
/Tutorial Deploy to OpenCV dnn/aid_cv2_dnn.py
|
36362693bf9a60e574627a93aee50d2300be9242
|
[
"BSD-2-Clause"
] |
permissive
|
maikherbig/AIDeveloper
|
6c37ab6f7abdfd82f33a1717a23dac81332a64c1
|
66ade20df6b0b4f607e43f2e6661ab6d036afe3f
|
refs/heads/master
| 2023-02-20T15:20:22.385575
| 2023-02-06T09:47:40
| 2023-02-06T09:47:40
| 209,036,440
| 109
| 21
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,982
|
py
|
aid_cv2_dnn.py
|
import numpy as np
import pandas as pd
import cv2
from scipy import ndimage
#Run inference (using OpenCV's dnn module) on images using your model trained
#in AIDeveloper.
#Script contains functions:
#- to pre-preprocess images according to the input criteria of a loaded model
#- forward images through a model and get output (predictions)
def image_adjust_channels(images,target_channels=1):
"""
Check the number of channels of images.
Transform images (if needed) to get to the desired number of channels
Parameters
----------
images: numpy array of dimension (nr.images,height,width) for grayscale,
or of dimension (nr.images,height,width,channels) for RGB images
target_channels: int
target number of channels
can be one of the following:
- 1: target is a grayscale image. In case RGB images are
provided, the luminosity formula is used to convert of RGB to
grayscale
- 3: target is an RGB image. In case grayscale images are provided,
the information of each image is copied to all three channels to
convert grayscale to RGB"
Returns
----------
images: numpy array
images with adjusted number of channels
"""
#images.shape is (N,H,W) for grayscale, or (N,H,W,C) for RGB images
#(N,H,W,C) means (nr.images,height,width,channels)
#Make sure either (N,H,W), or (N,H,W,C) is provided
assert len(images.shape)==4 or len(images.shape)==3, "Shape of 'images' \
is not supported: " +str(images.shape)
if len(images.shape)==4:#Provided images are RGB
#Mare sure there are 3 channels (RGB)
assert images.shape[-1]==3, "Images have "+str(images.shape[-1])+" channels. This is (currently) not supported!"
if target_channels==1:#User wants Grayscale -> use the luminosity formula
images = (0.21 * images[:,:,:,:1]) + (0.72 * images[:,:,:,1:2]) + (0.07 * images[:,:,:,-1:])
images = images[:,:,:,0]
images = images.astype(np.uint8)
print("Used luminosity formula to convert RGB to Grayscale")
if len(images.shape)==3:#Provided images are Grayscale
if target_channels==3:#User wants RGB -> copy the information to all 3 channels
images = np.stack((images,)*3, axis=-1)
print("Copied information to all three channels to convert Grayscale to RGB")
return images
def image_crop_pad_np(images,pos_x,pos_y,final_h,final_w,padding_mode='constant'):
"""
Deprecated: Please use 'image_crop_pad_cv' instead, which uses OpenCV instead of numpy.
Function takes a list images (list of numpy arrays) an resizes them to
equal size by center cropping and/or padding.
Parameters
----------
images: list of images of arbitrary shape
(nr.images,height,width,channels)
can be a single image or multiple images
pos_x: float or ndarray of length N
The x coordinate(s) of the centroid of the event(s) [um]
pos_y: float or ndarray of length N
The y coordinate(s) of the centroid of the event(s) [um]
final_h: int
target image height [pixels]
final_w: int
target image width [pixels]
padding_mode: str
Perform the following padding operation if the cell is too far at the
border of the image such that the desired image size cannot be
obtained without going beyond the order of the image:
- Delete: Return empty array (all zero) if the cell is too far at border (delete image)
#the following text is copied from
https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html</p></body></html>"
- constant (default): Pads with a constant value.
- edge: Pads with the edge values of array.
- linear_ramp: Pads with the linear ramp between end_value and the array edge value.
- maximum: Pads with the maximum value of all or part of the vector along each axis.
- mean: Pads with the mean value of all or part of the vector along each axis.
- median: Pads with the median value of all or part of the vector along each axis.
- minimum: Pads with the minimum value of all or part of the vector along each axis.
- reflect: Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis.
- symmetric: Pads with the reflection of the vector mirrored along the edge of the array.
- wrap: Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning.
Returns
----------
images: list of images. Each image is a numpy array of shape
(final_h,final_w,channels)
"""
print("Deprecated: Please use 'image_crop_pad_cv' instead, which uses OpenCV instead of numpy.")
for i in range(len(images)):
image = images[i]
#Compute the edge-coordinates that define the cropped image
y1 = np.around(pos_y[i]-final_h/2.0)
x1 = np.around(pos_x[i]-final_w/2.0)
y2 = y1+final_h
x2 = x1+final_w
#Are these coordinates within the oringinal image?
#If not, the image needs padding
pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0
if y1<0:#Padding is required on top of image
pad_top = int(abs(y1))
y1 = 0 #set y1 to zero and pad pixels after cropping
if y2>image.shape[0]:#Padding is required on bottom of image
pad_bottom = int(y2-image.shape[0])
y2 = image.shape[0]
if x1<0:#Padding is required on left of image
pad_left = int(abs(x1))
x1 = 0
if x2>image.shape[1]:#Padding is required on right of image
pad_right = int(x2-image.shape[1])
x2 = image.shape[1]
#Get cropped image
temp = image[int(y1):int(y2),int(x1):int(x2)]
if pad_top+pad_bottom+pad_left+pad_right>0:
if padding_mode=="Delete":
temp = np.zeros_like(temp)
else:
#Perform all padding operations in one go
temp = np.pad(temp,pad_width=( (pad_top, pad_bottom),(pad_left, pad_right) ),mode=padding_mode)
images[i] = temp
return images
def image_crop_pad_cv2(images,pos_x,pos_y,pix,final_h,final_w,padding_mode="cv2.BORDER_CONSTANT"):
"""
Function takes a list images (list of numpy arrays) an resizes them to
equal size by center cropping and/or padding.
Parameters
----------
images: list of images of arbitrary shape
(nr.images,height,width,channels)
can be a single image or multiple images
pos_x: float or ndarray of length N
The x coordinate(s) of the centroid of the event(s) [um]
pos_y: float or ndarray of length N
The y coordinate(s) of the centroid of the event(s) [um]
final_h: int
target image height [pixels]
final_w: int
target image width [pixels]
padding_mode: str; OpenCV BorderType
Perform the following padding operation if the cell is too far at the
border such that the desired image size cannot be
obtained without going beyond the order of the image:
- "Delete": Return empty array (all zero) if the cell is too far at border (delete image)
#the following text is copied from
https://docs.opencv.org/3.4/d2/de8/group__core__array.html#ga209f2f4869e304c82d07739337eae7c5
- "cv2.BORDER_CONSTANT": iiiiii|abcdefgh|iiiiiii with some specified i
- "cv2.BORDER_REFLECT": fedcba|abcdefgh|hgfedcb
- "cv2.BORDER_REFLECT_101": gfedcb|abcdefgh|gfedcba
- "cv2.BORDER_DEFAULT": same as BORDER_REFLECT_101
- "cv2.BORDER_REPLICATE": aaaaaa|abcdefgh|hhhhhhh
- "cv2.BORDER_WRAP": cdefgh|abcdefgh|abcdefg
Returns
----------
images: list of images. Each image is a numpy array of shape
(final_h,final_w,channels)
"""
#Convert position of cell from "um" to "pixel index"
pos_x,pos_y = pos_x/pix,pos_y/pix
for i in range(len(images)):
image = images[i]
#Compute the edge-coordinates that define the cropped image
y1 = np.around(pos_y[i]-final_h/2.0)
x1 = np.around(pos_x[i]-final_w/2.0)
y2 = y1+final_h
x2 = x1+final_w
#Are these coordinates within the oringinal image?
#If not, the image needs padding
pad_top,pad_bottom,pad_left,pad_right = 0,0,0,0
if y1<0:#Padding is required on top of image
pad_top = int(abs(y1))
y1 = 0 #set y1 to zero and pad pixels after cropping
if y2>image.shape[0]:#Padding is required on bottom of image
pad_bottom = int(y2-image.shape[0])
y2 = image.shape[0]
if x1<0:#Padding is required on left of image
pad_left = int(abs(x1))
x1 = 0
if x2>image.shape[1]:#Padding is required on right of image
pad_right = int(x2-image.shape[1])
x2 = image.shape[1]
#Crop the image
temp = image[int(y1):int(y2),int(x1):int(x2)]
if pad_top+pad_bottom+pad_left+pad_right>0:
if padding_mode=="Delete":
temp = np.zeros_like(temp)
else:
#Perform all padding operations in one go
temp = cv2.copyMakeBorder(temp, pad_top, pad_bottom, pad_left, pad_right, eval(padding_mode))
images[i] = temp
return images
def image_zooming(images,zoom_factor,zoom_interpol_method):
"""
Function takes a list of images (list of numpy arrays) an resizes them to
an equal size by scaling (interpolation).
Parameters
----------
images: list of images of arbitrary shape
zoom_factor: float
Factor by which the size of the images should be zoomed
zoom_interpol_method: str; available are: (text copied from original docs: https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#resize)
-"cv2.INTER_NEAREST" – a nearest-neighbor interpolation
-"cv2.INTER_LINEAR" – a bilinear interpolation (used by default)
-"cv2.INTER_AREA" – resampling using pixel area relation. It may be a preferred method for image decimation, as it gives moire’-free results. But when the image is zoomed, it is similar to the INTER_NEAREST method.
-"cv2.INTER_CUBIC" - a bicubic interpolation over 4x4 pixel neighborhood
-"cv2.INTER_LANCZOS4" - a Lanczos interpolation over 8x8 pixel neighborhood
Returns
----------
list of images of arbitrary shape
"""
#final_h = int(np.around(zoom_factor*images[0].shape[1]))
#final_w = int(np.around(zoom_factor*images[0].shape[2]))
for i in range(len(images)):
#the order (width,height) in cv2.resize is not an error. OpenCV wants this order...
#images[i] = cv2.resize(images[i], dsize=(final_w,final_h), interpolation=eval(zoom_interpol_method))
images[i] = cv2.resize(images[i], dsize=None,fx=zoom_factor, fy=zoom_factor, interpolation=eval(zoom_interpol_method))
return images
def image_normalization(images,normalization_method,mean_trainingdata=None,std_trainingdata=None):
"""
Perform a normalization of the pixel values.
Parameters
----------
images: ndarray
normalization_method: str
Factor by which the size of the images should be zoomed
normalization_method: str; available are: (text copied from original docs:
https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#resize)
-"None" – No normalization is applied.
-"Div. by 255" – Each input image is divided by 255 (useful since pixel
values go from 0 to 255, so the result will be in range 0-1)
-"StdScaling using mean and std of each image individually" – The mean
and standard deviation of each input image itself is used to scale it
by first subtracting the mean and then dividing by the standard deviation
-"StdScaling using mean and std of all training data" - During model
training, the mean and std of the entire training set was determined.
This mean and standard deviation is used to normalize images by first
subtracting the mean and then dividing by the standard deviation
mean_trainingdata: float; the mean pixel value obtained from the training dataset
std_trainingdata: float; the std of the pixel values obtained from the training dataset
Returns
----------
ndarray of images
"""
if normalization_method == "StdScaling using mean and std of all training data":
#make sure pandas series is converted to numpy array
if type(mean_trainingdata)==pd.core.series.Series:
mean_trainingdata=mean_trainingdata.values[0]
std_trainingdata=std_trainingdata.values[0]
if np.allclose(std_trainingdata,0):
std_trainingdata = 0.0001
print("Set the standard deviation (std_trainingdata) to 0.0001 because otherwise div. by 0 would have happend!")
if len(images.shape)==3: #single channel Grayscale rtdc data
#Add the "channels" dimension
images = np.expand_dims(images,3)
images = images.astype(np.float32)
for k in range(images.shape[0]):
line = images[k,:,:,:]
###########Scaling############
if normalization_method == "None":
pass #dont do anything
elif normalization_method == "Div. by 255":
line = line/255.0
elif normalization_method == "StdScaling using mean and std of each image individually":
mean = np.mean(line)
std = np.std(line)
if np.allclose(std,0):
std = 0.0001
print("Set the standard deviation to 0.0001 because otherwise div. by 0 would have happend!")
line = (line-mean)/std
elif normalization_method == "StdScaling using mean and std of all training data":
line = (line-mean_trainingdata)/std_trainingdata
#Under NO circumstances, training data should contain nan values
ind = np.isnan(line)
line[ind] = np.random.random() #replace nan with random values. This is better than nan, since .fit will collapse and never get back
images[k,:,:,:] = line
return images
def image_preprocessing(images,pos_x,pos_y,pix=0.34,target_imsize=32,
target_channels=1,zoom_factor=1,
zoom_interpol_method="cv2.INTER_NEAREST",
padding_mode="cv2.BORDER_CONSTANT",
normalization_method="Div. by 255",
mean_trainingdata=None,std_trainingdata=None):
"""
Wrapper function which performs all image preprocessing steps, required
for preparing raw rtdc images to be forwarded through a neural net,
Parameters
----------
images: numpy array of shape (nr.images,height,width,channels)
can be a single image or multiple images
pos_x: float or ndarray of length N
The x coordinate(s) of the centroid of the event(s) [um]
pos_y: float or ndarray of length N
The y coordinate(s) of the centroid of the event(s) [um]
pix: float
Resolution [µm/pix]
target_imsize: int
target image size (in pixels)
currently, only squared images are supported. Hence, width=height
target_channels: int
Indicates the number of channels of the images
can be one of the following:
- 1: model expects grayscale images
- 3: model expects RGB (color) images
zoom_factor: float
Factor by which the size of the images should be zoomed
zoom_interpol_method: str; OpenCV interpolation flag
can be one of the following methods:
- "cv2.INTER_NEAREST": a nearest-neighbor interpolation
- "cv2.INTER_LINEAR": a bilinear interpolation (used by default)
- "cv2.INTER_AREA": resampling using pixel area relation. It may be a
preferred method for image decimation, as it gives moire’-free results.
But when the image is zoomed, it is similar to the INTER_NEAREST method.
- "cv2.INTER_CUBIC": a bicubic interpolation over 4×4 pixel neighborhood
- "cv2.INTER_LANCZOS4": a Lanczos interpolation over 8×8 pixel neighborhood
padding_mode: str; OpenCV BorderType
Perform the following padding operation if the cell is too far at the
border of the image such that the desired image size cannot be
obtained without going beyond the order of the image:
- "Delete": Return empty array (all zero) if the cell is too far at border (delete image)
#the following text is copied from
https://docs.opencv.org/3.4/d2/de8/group__core__array.html#ga209f2f4869e304c82d07739337eae7c5
- "cv2.BORDER_CONSTANT": iiiiii|abcdefgh|iiiiiii with some specified i
- "cv2.BORDER_REFLECT": fedcba|abcdefgh|hgfedcb
- "cv2.BORDER_REFLECT_101": gfedcb|abcdefgh|gfedcba
- "cv2.BORDER_DEFAULT": same as BORDER_REFLECT_101
- "cv2.BORDER_REPLICATE": aaaaaa|abcdefgh|hhhhhhh
- "cv2.BORDER_WRAP": cdefgh|abcdefgh|abcdefg
normalization_method: str
Define a method to normalize the pixel-values of the images.
can be one of the following methods:
- 'None': No normalization is applied
- 'Div. by 255': each pixel value is divided by 255 (default)
- 'StdScaling using mean and std of each image individually': The mean and
standard deviation of each input image itself is used to scale it by
first subtracting the mean and then dividing by the standard deviation.
-StdScaling using mean and std of all training data: A mean and std.
value was obtained while fitting the neural net by averaging the entire
training dataset. These fixed values are used to scale images during
training by first subtracting the mean and then dividing by the
standard deviation.
mean_trainingdata: float; the mean pixel value obtained from the training dataset
std_trainingdata: float; the std of the pixel values obtained from the training dataset
"""
#Adjust number of channels
images = image_adjust_channels(images,target_channels)
#Convert image array to list
images = list(images)
#Apply zooming operation if required
if zoom_factor!=1:
images = image_zooming(images,zoom_factor,zoom_interpol_method)
#Adjust pos_x and pos_y accordingly
pos_x,pos_y = zoom_factor*pos_x,zoom_factor*pos_y
#Cropping and padding operation to obtain images of desired size
images = image_crop_pad_cv2(images=images,pos_x=pos_x,pos_y=pos_y,pix=pix,final_h=target_imsize,final_w=target_imsize,padding_mode=padding_mode)
#Convert to unit8 arrays
images = np.array((images), dtype="uint8")
if target_channels==1:#User wants Grayscale -> add empty channel dimension (required for inference in OpenCV dnn)
images = np.expand_dims(images,3)
#Normalize images
images = image_normalization(images,normalization_method="Div. by 255",mean_trainingdata=None,std_trainingdata=None)
return images
def load_model_meta(meta_path):
"""
Extract meta information from a meta file that was created during training
in AID. Function returns all information how images need ot be preprocessed
before passing them throught the neural net.
Parameters
----------
meta_path: str; path to a meta file (generated by AID when during model training)
Returns
----------
pd.DataFrame ; A DataFrame with the following keys:
target_imsize: input image size required by the neural net
target_channels: number of image channels required by the neural net
normalization_method: the method to normalize the pixel-values of the images
mean_trainingdata: the mean pixel value obtained from the training dataset
std_trainingdata: the std of the pixel values obtained from the training dataset
zoom_factor: factor by which the size of the images should be zoomed
zoom_interpol_method: OpenCV interpolation flag
padding_mode: OpenCV borderType flag
"""
xlsx = pd.ExcelFile(meta_path)
#The zooming factor is saved in the UsedData sheet
meta = pd.read_excel(xlsx,sheet_name="UsedData")
zoom_factor = meta["zoom_factor"].iloc[0]#should images be zoomed before forwarding through neural net?
meta = pd.read_excel(xlsx,sheet_name="Parameters")
try:
model_type = meta["Chosen Model"].iloc[0]#input dimensions of the model
except:
model_type = "Unknown"
try:
target_imsize = meta["Input image crop"].iloc[0]#input dimensions of the model
except:
target_imsize = meta["Input image size"].iloc[0]#input dimensions of the model
normalization_method = meta["Normalization"].iloc[0]#normalization method
if normalization_method == "StdScaling using mean and std of all training data":
mean_trainingdata = meta["Mean of training data used for scaling"]
std_trainingdata = meta["Std of training data used for scaling"]
else:
mean_trainingdata = None
std_trainingdata = None
#Following parameters may not exist in meta files of older AID versions. Hence try/except
#Color mode: grayscale or RGB?
try:
target_channels = meta["Color Mode"].iloc[0]
except:
target_channels = "grayscale"
if target_channels.lower() =="grayscale":
target_channels = 1
elif target_channels.lower() =="rgb":
target_channels = 3
#The order for the zooming operation
try:
zoom_interpol_method = meta["Zoom order"].iloc[0]
except:
zoom_interpol_method = "cv2.INTER_NEAREST"
#Translate zoom_interpol_method to OpenCV argument
if "cv2." not in str(zoom_interpol_method):
zoom_interpol_method = zoom_arguments_scipy2cv(zoom_factor,zoom_interpol_method)
#Padding mode
try:
padding_mode = meta["paddingMode"].iloc[0]
except:
padding_mode = "constant"#cv2.BORDER_CONSTANT
#translate padding_mode to OpenCV argument
if "cv2." not in padding_mode:
padding_mode = pad_arguments_np2cv(padding_mode)
#Write information in one DataFrame
img_processing_settings = pd.DataFrame()
img_processing_settings["model_type"]=model_type,
img_processing_settings["target_imsize"]=target_imsize,
img_processing_settings["target_channels"]=target_channels,
img_processing_settings["normalization_method"]=normalization_method,
img_processing_settings["mean_trainingdata"]=mean_trainingdata,
img_processing_settings["std_trainingdata"]=std_trainingdata,
img_processing_settings["zoom_factor"]=zoom_factor,
img_processing_settings["zoom_interpol_method"]=zoom_interpol_method,
img_processing_settings["padding_mode"]=padding_mode,
return img_processing_settings
def forward_images_cv2(model_pb,img_processing_settings,images,pos_x,pos_y,pix):
"""
Run inference on images using a tensorflow model
Parameters
----------
model_pb: cv2.dnn_Net object; a frozen model graph that was loaded using cv2.dnn.readNet
img_processing_settings: pd.DataFrame; a DataFrame generated using load_model_meta()
images: numpy array of shape (nr.images,height,width) for grayscale images, or
of shape (nr.images,height,width,channels) for RGB images
pos_x: float or ndarray of length N
The x coordinate(s) of the centroid of the event(s) [um]
pos_y: float or ndarray of length N
The y coordinate(s) of the centroid of the event(s) [um]
pix: float
Resolution [µm/pix]
Returns
----------
pd.DataFrame ; A DataFrame with the following keys:
target_imsize: input image size required by the neural net
target_channels: number of image channels required by the neural net
normalization_method: the method to normalize the pixel-values of the images
mean_trainingdata: the mean pixel value obtained from the training dataset
std_trainingdata: the std of the pixel values obtained from the training dataset
zoom_factor: factor by which the size of the images should be zoomed
zoom_interpol_method: OpenCV interpolation flag
padding_mode: OpenCV borderType flag
"""
target_imsize = int(img_processing_settings["target_imsize"].values[0])
target_channels = int(img_processing_settings["target_channels"].values[0])
zoom_factor = float(img_processing_settings["zoom_factor"].values[0])
zoom_interpol_method = str(img_processing_settings["zoom_interpol_method"].values[0])
padding_mode = str(img_processing_settings["padding_mode"].values[0])
normalization_method = str(img_processing_settings["normalization_method"].values[0])
mean_trainingdata = img_processing_settings["mean_trainingdata"].values[0]
std_trainingdata = img_processing_settings["std_trainingdata"].values[0]
#Preprocess images
images = image_preprocessing(images,pos_x=pos_x,pos_y=pos_y,pix=pix,
target_imsize=target_imsize,
target_channels=target_channels,
zoom_factor=zoom_factor,
zoom_interpol_method=zoom_interpol_method,
padding_mode=padding_mode,
normalization_method=normalization_method,
mean_trainingdata=mean_trainingdata,
std_trainingdata=std_trainingdata)
#Load the model
blob = cv2.dnn.blobFromImages(images, 1, (target_imsize,target_imsize), swapRB=False, crop=False)
model_pb.setInput(blob)
output_pb = model_pb.forward()
return output_pb
def pad_arguments_np2cv(padding_mode):
"""
NumPy's pad and OpenCVs copyMakeBorder can do the same thing, but the
function arguments are called differntly.
This function takes numpy padding_mode argument and returns the
corresponsing borderType for cv2.copyMakeBorder
Parameters
----------
padding_mode: str; numpy padding mode
- "constant" (default): Pads with a constant value.
- "edge": Pads with the edge values of array.
- "linear_ramp": Pads with the linear ramp between end_value and the array edge value.
- "maximum": Pads with the maximum value of all or part of the vector along each axis.
- "mean": Pads with the mean value of all or part of the vector along each axis.
- "median": Pads with the median value of all or part of the vector along each axis.
- "minimum": Pads with the minimum value of all or part of the vector along each axis.
- "reflect": Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis.
- "symmetric": Pads with the reflection of the vector mirrored along the edge of the array.
- "wrap": Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning.
Returns
----------
str: OpenCV borderType
- "cv2.BORDER_CONSTANT": iiiiii|abcdefgh|iiiiiii with some specified i
- "cv2.BORDER_REFLECT": fedcba|abcdefgh|hgfedcb
- "cv2.BORDER_REFLECT_101": gfedcb|abcdefgh|gfedcba
- "cv2.BORDER_DEFAULT": same as BORDER_REFLECT_101
- "cv2.BORDER_REPLICATE": aaaaaa|abcdefgh|hhhhhhh
- "cv2.BORDER_WRAP": cdefgh|abcdefgh|abcdefg
"""
#Check that the padding_mode is actually supported by OpenCV
supported = ["constant","edge","reflect","symmetric","wrap"]
assert padding_mode in supported, "The padding mode: '"+padding_mode+"' is\
not supported"
if padding_mode=="constant":
return "cv2.BORDER_CONSTANT"
if padding_mode=="edge":
return "cv2.BORDER_REPLICATE"
if padding_mode=="reflect":
return "cv2.BORDER_REFLECT_101"
if padding_mode=="symmetric":
return "cv2.BORDER_REFLECT"
if padding_mode=="wrap":
return "cv2.BORDER_WRAP"
def zoom_arguments_scipy2cv(zoom_factor,zoom_interpol_method):
"""
Resulting images after performing ndimage.zoom or cv2.resize are never identical,
but with certain settings you get at least similar results.
Parameters
----------
zoom_factor: float,
factor by which the size of the images should be zoomed
zoom_interpol_method: int,
The order of the spline interpolation
Returns
----------
str; OpenCV interpolation flag
"""
opencv_zoom_options = ["cv2.INTER_NEAREST","cv2.INTER_LINEAR","cv2.INTER_AREA","cv2.INTER_CUBIC","cv2.INTER_LANCZOS4"]
if type(zoom_interpol_method)==str:
if zoom_interpol_method in opencv_zoom_options:
return zoom_interpol_method
if zoom_factor>=0.8:
if zoom_interpol_method==0: return "cv2.INTER_NEAREST"
elif zoom_interpol_method==1: return "cv2.INTER_LINEAR"
elif zoom_interpol_method==2: return "cv2.INTER_CUBIC"
elif zoom_interpol_method==3: return "cv2.INTER_LANCZOS4"
elif zoom_interpol_method==4: return "cv2.INTER_LANCZOS4"
elif zoom_interpol_method==5: return "cv2.INTER_LANCZOS4"
if zoom_factor<0.8: #for downsampling the image, all methods perform similar
#but cv2.INTER_LINEAR, is closest most of the time, irrespective of the zoom_order
return "cv2.INTER_LINEAR"
|
122c219a48353718e97c7371927b82be1a8a5f74
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/rpc/server.py
|
6ee683c73ba56b3ee1e8c4af4a232959a18e1f1a
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 17,956
|
py
|
server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
# pylint: disable=invalid-name
import os
import ctypes
import socket
import select
import struct
import logging
import threading
import multiprocessing
import time
import errno
import tvm._ffi
from tvm._ffi.base import py_str
from tvm._ffi.libinfo import find_lib_path
from tvm.runtime.module import load_module as _load_module
from tvm.contrib import utils
from tvm.contrib.popen_pool import PopenWorker
from . import _ffi_api
from . import base
# pylint: disable=unused-import
from . import testing
from .base import TrackerCode
logger = logging.getLogger("RPCServer")
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
logger.propagate = False
def _server_env(load_library, work_path=None):
"""Server environment function return temp dir"""
if work_path:
temp = work_path
else:
temp = utils.tempdir()
# pylint: disable=unused-variable
@tvm._ffi.register_func("tvm.rpc.server.workpath", override=True)
def get_workpath(path):
return temp.relpath(path)
@tvm._ffi.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
@tvm._ffi.register_func("tvm.rpc.server.download_linked_module", override=True)
def download_linked_module(file_name):
"""Load module from remote side."""
# pylint: disable=import-outside-toplevel
path = temp.relpath(file_name)
if path.endswith(".o"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc
_cc.create_shared(path + ".so", path)
path += ".so"
elif path.endswith(".tar"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc, tar as _tar
tar_temp = utils.tempdir(custom_path=path.replace(".tar", ""))
_tar.untar(path, tar_temp.temp_dir)
files = [tar_temp.relpath(x) for x in tar_temp.listdir()]
_cc.create_shared(path + ".so", files)
path += ".so"
elif path.endswith(".dylib") or path.endswith(".so"):
pass
else:
raise RuntimeError(f"Do not know how to link {file_name}")
logger.info("Send linked module %s to client", path)
return bytearray(open(path, "rb").read())
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _serving(sock, addr, opts, load_library):
logger.info(f"connected from {addr}")
work_path = utils.tempdir()
old_cwd = os.getcwd()
os.chdir(work_path.path) # Avoiding file name conflict between sessions.
logger.info(f"start serving at {work_path.path}")
def _serve_loop():
_server_env(load_library, work_path)
_ffi_api.ServerLoop(sock.fileno())
server_proc = multiprocessing.Process(target=_serve_loop)
server_proc.start()
server_proc.join(opts.get("timeout", None)) # Wait until finish or timeout.
if server_proc.is_alive():
logger.info("timeout in RPC session, kill..")
_ffi_api.ReturnException(
sock.fileno(),
f'RPCSessionTimeoutError: Your {opts["timeout"]}s session has expired, '
f'try to increase the "session_timeout" value.',
)
try:
import psutil # pylint: disable=import-outside-toplevel
# Terminate worker children firstly.
for child in psutil.Process(server_proc.pid).children(recursive=True):
child.terminate()
except ImportError:
# Don't dependent `psutil` hardly, because it isn't a pure Python
# package and maybe hard to be installed on some platforms.
pass
server_proc.terminate()
logger.info(f"finish serving {addr}")
os.chdir(old_cwd)
work_path.remove()
sock.close()
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
"""Listening loop of the server."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key)
base.sendjson(tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key, cmap=old_keyset)
base.sendjson(
tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr]
)
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.warning("mismatch key from %s", addr)
continue
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError(f"{str(tracker_addr)} is not RPC Tracker")
# report status of current queue
cinfo = {"key": "server:" + rpc_key, "addr": (custom_addr, port)}
base.sendjson(tracker_conn, [TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
raise exc
# step 3: serving
_serving(conn, addr, opts, load_library)
def _connect_proxy_loop(addr, key, load_library):
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError(f"key: {key} has already been used in proxy")
if magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError(f"{str(addr)} is not RPC Proxy")
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
_serving(sock, addr, _parse_server_opt(remote_key.split()[1:]), load_library)
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError(f"Maximum retry error: last error: {str(err)}")
time.sleep(retry_period)
class PopenRPCServerState(object):
"""Internal PopenRPCServer State"""
current = None
def __init__(
self,
host,
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
reuse_addr=True,
timeout=None,
):
# start update
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
if silent:
logger.setLevel(logging.ERROR)
if not is_proxy:
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
if reuse_addr:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if timeout is not None:
sock.settimeout(timeout)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [errno.EADDRINUSE]:
continue
raise sock_err
if not self.port:
raise ValueError(f"cannot bind to any port in [{port}, {port_end})")
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.thread = threading.Thread(
target=_listen_loop,
args=(self.sock, self.port, key, tracker_addr, load_library, self.custom_addr),
)
self.thread.start()
else:
self.thread = threading.Thread(
target=_connect_proxy_loop, args=((host, port), key, load_library)
)
self.thread.start()
def _popen_start_rpc_server(
host,
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
no_fork=False,
server_init_callback=None,
reuse_addr=True,
timeout=None,
):
if no_fork:
multiprocessing.set_start_method("spawn")
if server_init_callback:
server_init_callback()
# This is a function that will be sent to the
# Popen worker to run on a separate process.
# Create and start the server in a different thread
state = PopenRPCServerState(
host,
port,
port_end,
is_proxy,
tracker_addr,
key,
load_library,
custom_addr,
silent,
reuse_addr,
timeout,
)
PopenRPCServerState.current = state
# returns the port so that the main can get the port number.
return state.port
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based server with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
no_fork: bool, optional
Whether forbid fork in multiprocessing.
server_init_callback: Callable, optional
Additional initialization function when starting the server.
reuse_addr: bool, optional
Allows the kernel to reuse a local socket in TIME_WAIT state.
timeout: float, optional
set a timeout for all operations on the socket
Note
----
The RPC server only sees functions in the tvm namespace.
To bring additional custom functions to the server env, you can use server_init_callback.
.. code:: python
def server_init_callback():
import tvm
# must import mypackage here
import mypackage
tvm.register_func("function", mypackage.func)
server = rpc.Server(host, server_init_callback=server_init_callback)
"""
def __init__(
self,
host="0.0.0.0",
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
no_fork=False,
server_init_callback=None,
reuse_addr=True,
timeout=None,
):
try:
if _ffi_api.ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.proc = PopenWorker()
# send the function
self.proc.send(
_popen_start_rpc_server,
[
host,
port,
port_end,
is_proxy,
tracker_addr,
key,
load_library,
custom_addr,
silent,
no_fork,
server_init_callback,
reuse_addr,
timeout,
],
)
# receive the port
self.port = self.proc.recv()
self.host = host
def terminate(self):
"""Terminate the server process"""
if self.proc:
self.proc.kill()
self.proc = None
def __del__(self):
try:
self.terminate()
except ImportError:
pass
|
a1f4afd67ad77e451c0e3288e1898186fd812356
|
2dc24a356ebe7a362623780603379a5b35a65c2f
|
/terraform/stacks/bot/lambdas/python/slack_automation_bot/slack_bolt/kwargs_injection/args.py
|
0774b183a541b70318a798b5d28a2121fb19b801
|
[
"MIT"
] |
permissive
|
cloud-sniper/cloud-sniper
|
cef08402f9109211c33909bdb3de07b16952e308
|
4b026da33695b25033c7667679f3cf552c4bf3b5
|
refs/heads/master
| 2023-06-24T20:46:02.377409
| 2023-04-14T14:48:45
| 2023-04-14T14:48:45
| 210,739,453
| 184
| 36
|
MIT
| 2023-04-14T14:48:46
| 2019-09-25T02:34:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,683
|
py
|
args.py
|
# pytype: skip-file
import logging
from logging import Logger
from typing import Callable, Dict, Any, Optional
from slack_bolt.context import BoltContext
from slack_bolt.context.ack import Ack
from slack_bolt.context.respond import Respond
from slack_bolt.context.say import Say
from slack_bolt.request import BoltRequest
from slack_bolt.response import BoltResponse
from slack_sdk import WebClient
class Args:
client: WebClient
logger: Logger
req: BoltRequest
resp: BoltResponse
request: BoltRequest
response: BoltResponse
context: BoltContext
body: Dict[str, Any]
# payload
payload: Dict[str, Any]
options: Optional[Dict[str, Any]] # payload alias
shortcut: Optional[Dict[str, Any]] # payload alias
action: Optional[Dict[str, Any]] # payload alias
view: Optional[Dict[str, Any]] # payload alias
command: Optional[Dict[str, Any]] # payload alias
event: Optional[Dict[str, Any]] # payload alias
message: Optional[Dict[str, Any]] # payload alias
# utilities
ack: Ack
say: Say
respond: Respond
# middleware
next: Callable[[], None]
def __init__(
self,
*,
logger: logging.Logger,
client: WebClient,
req: BoltRequest,
resp: BoltResponse,
context: BoltContext,
body: Dict[str, Any],
payload: Dict[str, Any],
options: Optional[Dict[str, Any]] = None,
shortcut: Optional[Dict[str, Any]] = None,
action: Optional[Dict[str, Any]] = None,
view: Optional[Dict[str, Any]] = None,
command: Optional[Dict[str, Any]] = None,
event: Optional[Dict[str, Any]] = None,
message: Optional[Dict[str, Any]] = None,
ack: Ack,
say: Say,
respond: Respond,
next: Callable[[], None],
**kwargs # noqa
):
self.logger: logging.Logger = logger
self.client: WebClient = client
self.request = self.req = req
self.response = self.resp = resp
self.context: BoltContext = context
self.body: Dict[str, Any] = body
self.payload: Dict[str, Any] = payload
self.options: Optional[Dict[str, Any]] = options
self.shortcut: Optional[Dict[str, Any]] = shortcut
self.action: Optional[Dict[str, Any]] = action
self.view: Optional[Dict[str, Any]] = view
self.command: Optional[Dict[str, Any]] = command
self.event: Optional[Dict[str, Any]] = event
self.message: Optional[Dict[str, Any]] = message
self.ack: Ack = ack
self.say: Say = say
self.respond: Respond = respond
self.next: Callable[[], None] = next
|
b91c7035bb3f5a6466973fb2697eb08b69d96953
|
dbe83cf6c2b78a61def862ca19625c2f78268af8
|
/ibis/expr/operations/window.py
|
b6af75af47699a13a1794042cc74c99afcc808ce
|
[
"Apache-2.0"
] |
permissive
|
ibis-project/ibis
|
56a169d75805db7dfd39192cf0562521c405ff1c
|
3866492906d731dc170b560e7d7471bd4855169a
|
refs/heads/master
| 2023-09-01T17:07:38.854510
| 2023-09-01T13:52:08
| 2023-09-01T15:32:04
| 34,139,230
| 2,304
| 384
|
Apache-2.0
| 2023-09-14T21:52:21
| 2015-04-17T20:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,609
|
py
|
window.py
|
from __future__ import annotations
from abc import abstractmethod
from typing import Optional
from public import public
from typing_extensions import TypeVar
import ibis.common.exceptions as com
import ibis.expr.datashape as ds
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
from ibis.common.patterns import CoercionError
from ibis.common.typing import VarTuple # noqa: TCH001
from ibis.expr.operations.analytic import Analytic
from ibis.expr.operations.core import Column, Value
from ibis.expr.operations.generic import Literal
from ibis.expr.operations.numeric import Negate
from ibis.expr.operations.reductions import Reduction
from ibis.expr.operations.relations import Relation # noqa: TCH001
from ibis.expr.operations.sortkeys import SortKey # noqa: TCH001
T = TypeVar("T", bound=dt.Numeric | dt.Interval, covariant=True)
S = TypeVar("S", bound=ds.DataShape, default=ds.Any, covariant=True)
@public
class WindowBoundary(Value[T, S]):
# TODO(kszucs): consider to prefer Concrete base class here
# pretty similar to SortKey and Alias operations which wrap a single value
value: Value[T, S]
preceding: bool
@property
def following(self) -> bool:
return not self.preceding
@property
def shape(self) -> S:
return self.value.shape
@property
def dtype(self) -> T:
return self.value.dtype
@classmethod
def __coerce__(cls, value, **kwargs):
arg = super().__coerce__(value, **kwargs)
if isinstance(arg, cls):
return arg
elif isinstance(arg, Negate):
return cls(arg.arg, preceding=True)
elif isinstance(arg, Literal):
new = arg.copy(value=abs(arg.value))
return cls(new, preceding=arg.value < 0)
elif isinstance(arg, Value):
return cls(arg, preceding=False)
else:
raise CoercionError(f"Invalid window boundary type: {type(arg)}")
@public
class WindowFrame(Value):
"""A window frame operation bound to a table."""
table: Relation
group_by: VarTuple[Column] = ()
order_by: VarTuple[SortKey] = ()
shape = ds.columnar
def __init__(self, start, end, **kwargs):
if start and end and start.dtype != end.dtype:
raise com.IbisTypeError(
"Window frame start and end boundaries must have the same datatype"
)
super().__init__(start=start, end=end, **kwargs)
def dtype(self) -> dt.DataType:
return dt.Array(dt.Struct.from_tuples(self.table.schema.items()))
@property
@abstractmethod
def start(self):
...
@property
@abstractmethod
def end(self):
...
@public
class RowsWindowFrame(WindowFrame):
how = "rows"
start: Optional[WindowBoundary[dt.Integer]] = None
end: Optional[WindowBoundary] = None
max_lookback: Optional[Value[dt.Interval]] = None
def __init__(self, max_lookback, order_by, **kwargs):
if max_lookback:
# TODO(kszucs): this should belong to a timeseries extension rather than
# the core window operation
if len(order_by) != 1:
raise com.IbisTypeError(
"`max_lookback` window must be ordered by a single column"
)
if not order_by[0].dtype.is_timestamp():
raise com.IbisTypeError(
"`max_lookback` window must be ordered by a timestamp column"
)
super().__init__(max_lookback=max_lookback, order_by=order_by, **kwargs)
@public
class RangeWindowFrame(WindowFrame):
how = "range"
start: Optional[WindowBoundary[dt.Numeric | dt.Interval]] = None
end: Optional[WindowBoundary[dt.Numeric | dt.Interval]] = None
@public
class WindowFunction(Value):
func: Value
frame: WindowFrame
dtype = rlz.dtype_like("func")
shape = ds.columnar
def __init__(self, func, frame):
from ibis.expr.analysis import propagate_down_window, shares_all_roots
if not func.find((Reduction, Analytic)):
raise com.IbisTypeError("Window function expression must be analytic")
func = propagate_down_window(func, frame)
if not shares_all_roots(func, frame):
raise com.RelationError(
"Window function expressions doesn't fully originate from the "
"dependencies of the window expression."
)
super().__init__(func=func, frame=frame)
@property
def name(self):
return self.func.name
public(WindowOp=WindowFunction, Window=WindowFunction)
|
725ad184b03d819fe324c64ba94cd85b508f8c28
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/main/refbooks/downloaders/loader.py
|
cc5881ad8ab5c3ac46b55fdda76f6178da07fcc9
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 638
|
py
|
loader.py
|
# ----------------------------------------------------------------------
# Downloader Loader
# ----------------------------------------------------------------------
# Copyright (C) 2007-2022 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.loader.base import BaseLoader
from .base import BaseDownloader
class DownloaderLoader(BaseLoader):
name = "downloader"
ignored_names = {"base", "loader"}
base_cls = BaseDownloader
base_path = ("main", "refbooks", "downloaders")
# Create singleton object
loader = DownloaderLoader()
|
42f526c0fb165ae1899c07602d2e733ed1e36380
|
bdf0d4d3aac186af3ad0ad6ac9f380f9a0573fba
|
/aries_cloudagent/protocols/revocation_notification/v2_0/models/rev_notification_record.py
|
abbd3e04aaecda8531a965a742407eed87c8b416
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
hyperledger/aries-cloudagent-python
|
f25d961e0717a4d703bf43df3e4b4bc8ec07b908
|
39cac36d8937ce84a9307ce100aaefb8bc05ec04
|
refs/heads/main
| 2023-09-01T15:37:05.353674
| 2023-08-31T14:13:06
| 2023-08-31T14:13:06
| 193,556,007
| 370
| 530
|
Apache-2.0
| 2023-09-14T17:59:34
| 2019-06-24T18:12:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,689
|
py
|
rev_notification_record.py
|
"""Store revocation notification details until revocation is published."""
from typing import Optional, Sequence
from marshmallow import fields
from marshmallow.utils import EXCLUDE
from .....core.profile import ProfileSession
from .....messaging.models.base_record import BaseRecord, BaseRecordSchema
from .....messaging.valid import (
INDY_CRED_REV_ID_EXAMPLE,
INDY_CRED_REV_ID_VALIDATE,
INDY_REV_REG_ID_EXAMPLE,
INDY_REV_REG_ID_VALIDATE,
UUID4_EXAMPLE,
UUID4_VALIDATE,
)
from .....storage.error import StorageDuplicateError, StorageNotFoundError
from ..messages.revoke import Revoke
class RevNotificationRecord(BaseRecord):
"""Revocation Notification Record."""
class Meta:
"""RevNotificationRecord Meta."""
schema_class = "RevNotificationRecordSchema"
RECORD_TYPE = "revocation_notification"
RECORD_ID_NAME = "revocation_notification_id"
TAG_NAMES = {
"rev_reg_id",
"cred_rev_id",
"connection_id",
"version",
}
def __init__(
self,
*,
revocation_notification_id: str = None,
rev_reg_id: str = None,
cred_rev_id: str = None,
connection_id: str = None,
thread_id: str = None,
comment: str = None,
version: str = None,
**kwargs,
):
"""Construct record."""
super().__init__(revocation_notification_id, **kwargs)
self.rev_reg_id = rev_reg_id
self.cred_rev_id = cred_rev_id
self.connection_id = connection_id
self.thread_id = thread_id
self.comment = comment
self.version = version
@property
def revocation_notification_id(self) -> Optional[str]:
"""Return record id."""
return self._id
@property
def record_value(self) -> dict:
"""Return record value."""
return {prop: getattr(self, prop) for prop in ("thread_id", "comment")}
@classmethod
async def query_by_ids(
cls,
session: ProfileSession,
cred_rev_id: str,
rev_reg_id: str,
) -> "RevNotificationRecord":
"""Retrieve revocation notification record by cred rev id and/or rev reg id.
Args:
session: the profile session to use
cred_rev_id: the cred rev id by which to filter
rev_reg_id: the rev reg id by which to filter
"""
tag_filter = {
**{"version": "v2_0"},
**{"cred_rev_id": cred_rev_id for _ in [""] if cred_rev_id},
**{"rev_reg_id": rev_reg_id for _ in [""] if rev_reg_id},
}
result = await cls.query(session, tag_filter)
if len(result) > 1:
raise StorageDuplicateError(
"More than one RevNotificationRecord was found for the given IDs"
)
if not result:
raise StorageNotFoundError(
"No RevNotificationRecord found for the given IDs"
)
return result[0]
@classmethod
async def query_by_rev_reg_id(
cls,
session: ProfileSession,
rev_reg_id: str,
) -> Sequence["RevNotificationRecord"]:
"""Retrieve revocation notification records by rev reg id.
Args:
session: the profile session to use
rev_reg_id: the rev reg id by which to filter
"""
tag_filter = {
**{"version": "v2_0"},
**{"rev_reg_id": rev_reg_id for _ in [""] if rev_reg_id},
}
return await cls.query(session, tag_filter)
def to_message(self):
"""Return a revocation notification constructed from this record."""
if not self.thread_id:
raise ValueError(
"No thread ID set on revocation notification record, "
"cannot create message"
)
return Revoke(
revocation_format="indy-anoncreds",
credential_id=f"{self.rev_reg_id}::{self.cred_rev_id}",
comment=self.comment,
)
class RevNotificationRecordSchema(BaseRecordSchema):
"""Revocation Notification Record Schema."""
class Meta:
"""RevNotificationRecordSchema Meta."""
model_class = "RevNotificationRecord"
unknown = EXCLUDE
rev_reg_id = fields.Str(
required=False,
validate=INDY_REV_REG_ID_VALIDATE,
metadata={
"description": "Revocation registry identifier",
"example": INDY_REV_REG_ID_EXAMPLE,
},
)
cred_rev_id = fields.Str(
required=False,
validate=INDY_CRED_REV_ID_VALIDATE,
metadata={
"description": "Credential revocation identifier",
"example": INDY_CRED_REV_ID_EXAMPLE,
},
)
connection_id = fields.Str(
required=False,
validate=UUID4_VALIDATE,
metadata={
"description": (
"Connection ID to which the revocation notification will be sent;"
" required if notify is true"
),
"example": UUID4_EXAMPLE,
},
)
thread_id = fields.Str(
required=False,
metadata={
"description": (
"Thread ID of the credential exchange message thread resulting in the"
" credential now being revoked; required if notify is true"
)
},
)
comment = fields.Str(
required=False,
metadata={
"description": "Optional comment to include in revocation notification"
},
)
version = fields.Str(
required=False,
metadata={"description": "Version of Revocation Notification to send out"},
)
|
bd30937d3bbbfd12dcc0a448d19ac1624ab5ed12
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/A_Primer_on_Scientific_Programming_with_Python/class/classes.py
|
bf71c7d3e9ad7414e81b87d1ee5ea0bb1483c8fb
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,947
|
py
|
classes.py
|
from math import pi
class Y:
def __init__(self, v0):
self.v0 = v0
self.g = 9.81
def value(self, t):
return self.v0*t - 0.5*self.g*t**2
def formula(self):
return 'v0*t - 0.5*g*t**2; v0=%g' % self.v0
def __call__(self, t):
return self.v0*t - 0.5*self.g*t**2
def __str__(self):
return 'v0*t - 0.5*g*t**2; v0=%g' % self.v0
class Y2:
def value(self, t, v0=None):
if v0 is not None:
self.v0 = v0
g = 9.81
return self.v0*t - 0.5*g*t**2
def value(self, t, v0=None):
if v0 is not None:
self.v0 = v0
if not hasattr(self, 'v0'):
print 'You cannot call value(t) without first '\
'calling value(t,v0) to set v0'
return None
g = 9.81
return self.v0*t - 0.5*g*t**2
def value(self, t, v0=None):
if v0 is not None:
self.v0 = v0
g = 9.81
try:
value = self.v0*t - 0.5*g*t**2
except AttributeError:
msg = 'You cannot call value(t) without first '\
'calling value(t,v0) to set v0'
raise TypeError(msg)
return value
class VelocityProfile:
def __init__(self, beta, mu0, n, R):
self.beta, self.mu0, self.n, self.R = beta, mu0, n, R
def value(self, r):
beta, mu0, n, R = self.beta, self.mu0, self.n, self.R
n = float(n) # ensure float divisions
v = (beta/(2.0*mu0))**(1/n)*(n/(n+1))*\
(R**(1+1/n) - r**(1+1/n))
return v
class Account:
def __init__(self, name, account_number, initial_amount):
self.name = name
self.no = account_number
self.balance = initial_amount
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
self.balance -= amount
def dump(self):
s = '%s, %s, balance: %s' % \
(self.name, self.no, self.balance)
print s
class AccountP:
def __init__(self, name, account_number, initial_amount):
self._name = name
self._no = account_number
self._balance = initial_amount
def deposit(self, amount):
self._balance += amount
def withdraw(self, amount):
self._balance -= amount
def get_balance(self):
return self._balance
def dump(self):
s = '%s, %s, balance: %s' % \
(self._name, self._no, self._balance)
print s
class Person:
def __init__(self, name,
mobile_phone=None, office_phone=None,
private_phone=None, email=None):
self.name = name
self.mobile = mobile_phone
self.office = office_phone
self.private = private_phone
self.email = email
def add_mobile_phone(self, number):
self.mobile = number
def add_office_phone(self, number):
self.office = number
def add_private_phone(self, number):
self.private = number
def add_email(self, address):
self.email = address
def dump(self):
s = self.name + '\n'
if self.mobile is not None:
s += 'mobile phone: %s\n' % self.mobile
if self.office is not None:
s += 'office phone: %s\n' % self.office
if self.private is not None:
s += 'private phone: %s\n' % self.private
if self.email is not None:
s += 'email address: %s\n' % self.email
print s
class Circle:
def __init__(self, x0, y0, R):
self.x0, self.y0, self.R = x0, y0, R
def area(self):
return pi*self.R**2
def circumference(self):
return 2*pi*self.R
class Derivative:
def __init__(self, f, h=1E-5):
self.f = f
self.h = float(h)
def __call__(self, x):
f, h = self.f, self.h # make short forms
return (f(x+h) - f(x))/h
|
bd29ae7ec04f6fbfb742e1b972c4596264a2095c
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/fm/models/utils.py
|
f1f28b3d6c947aff94cd4da7962daa92055c298f
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
utils.py
|
# ---------------------------------------------------------------------
# FM models utils
# ---------------------------------------------------------------------
# Copyright (C) 2007-2023 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from typing import Union
# Third-party modules
from bson import ObjectId
import orjson
# NOC modules
from noc.core.clickhouse.connect import connection
def get_event(event_id):
"""
Get event by event_id
"""
sql = f"""select
e.event_id as id,
e.ts as timestamp,
e.event_class as event_class_bi_id,
e.managed_object as managed_object_bi_id,
e.start_ts as start_timestamp,
e.source, e.raw_vars, e.resolved_vars, e.vars,
d.alarms as alarms
from events e
LEFT OUTER JOIN (
SELECT event_id, groupArray(alarm_id) as alarms FROM disposelog where alarm_id != '' GROUP BY event_id) as d
ON e.event_id == d.event_id
where event_id='{event_id}'
format JSON
"""
cursor = connection()
res = orjson.loads(cursor.execute(sql, return_raw=True))
if res:
return ActiveEvent.create_from_dict(res["data"][0])
return None
def get_alarm(alarm_id) -> Union["ActiveAlarm", "ArchivedAlarm"]:
"""
Get alarm by alarm_id
"""
for ac in (ActiveAlarm, ArchivedAlarm):
a = ac.objects.filter(id=alarm_id).first()
if a:
return a
return None
def get_severity(alarms):
"""
Return severity CSS class name for the alarms
:param alarms: Single instance or list of alarms
"""
def f(a):
if hasattr(a, "id"):
return a.id
elif isinstance(a, str):
return ObjectId(a)
else:
return a
severity = 0
if not isinstance(alarms, list):
alarms = [alarms]
al = [f(x) for x in alarms]
for ac in (ActiveAlarm, ArchivedAlarm):
if len(al) == 1:
q = {"_id": al[0]}
else:
q = {"_id": {"$in": al}}
for d in ac._get_collection().find(q, {"severity": 1}):
severity = max(severity, d["severity"])
al.remove(d["_id"])
if not al:
break
return severity
# NOC modules
from .activeevent import ActiveEvent
from .activealarm import ActiveAlarm
from .archivedalarm import ArchivedAlarm
|
be08309f862254bf79284a1bea7a327bc9221f00
|
733d89e47c26ea1c69a7b7b511d72c0c0bed2682
|
/rplugin/qc-fast.py
|
4e90197e0c6f4f81b1eb4df73e56094cf780c952
|
[] |
no_license
|
raghur/fruzzy
|
67283868b4f26bef35d70769e629369f7225e02c
|
4cdfee7b828a5cace22bfd93cf23fee0b2b233c4
|
refs/heads/master
| 2021-06-02T19:24:04.184825
| 2020-08-31T03:51:03
| 2020-08-31T03:51:03
| 150,564,410
| 115
| 8
| null | 2020-10-11T03:53:53
| 2018-09-27T09:44:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
qc-fast.py
|
from python3 import fruzzy
import sys
import os
useNative = False
if os.getenv("FUZZY_CMOD"):
from python3.fruzzy_mod import scoreMatchesStr, baseline
useNative = True
def printResults(query, results):
print()
print("query: %s, results: " % query)
for r in results:
print(r)
def scoreMatches(q, c, limit, ispath):
if useNative:
idxArr = scoreMatchesStr(q, c, "", limit, ispath)
results = []
for i in idxArr:
results.append((c[i[0]],i[1]))
return results
else:
return fruzzy.scoreMatches(q, c, "", limit, ispath=ispath)
check = True
lines = []
def run():
results = scoreMatches("api", lines, 10, True)
printResults("api", results)
if check:
assert results[0][0].endswith("api.pb.go")
results = scoreMatches("rct", lines, 10, True)
printResults("rct", results)
if check:
assert results[0][0].endswith("root_cmd_test.go")
results = scoreMatches("fuzz", lines, 10, True)
printResults("fuzz", results)
if check:
assert results[0][0].endswith("pyfuzzy.py")
assert results[1][0].endswith("gofuzzy.py")
results = scoreMatches("ME", lines, 10, True)
printResults("ME", results)
if check:
assert results[0][0].endswith("README.md")
results = scoreMatches("cli", lines, 10, True)
printResults("cli", results)
if check:
assert results[0][0].endswith("cli.go")
assert results[1][0].endswith("client.go")
results = scoreMatches("testn", lines, 10, True)
printResults("testn", results)
if check:
assert results[0][0].endswith("test_main.py")
def main():
global check
global lines
file = "neomru_file"
if len(sys.argv) > 1:
check = False
file = sys.argv[1]
with open(file) as fh:
lines = [line.strip() for line in fh.readlines()]
print("Loaded %d lines from file: %s. Asserts are %s" % (len(lines), file,
check))
run()
if __name__ == "__main__":
main()
|
6589927af75e8624e3a658e9c757090445fa256d
|
f192190519b078fb4d3c27beee4f2fa7228f6f2f
|
/octoprint_PrintTimeGenius/analyzers/analyze_progress.py
|
e5791ae7b84f4e08fb8109269f8c5f2ace99b3b5
|
[] |
no_license
|
eyal0/OctoPrint-PrintTimeGenius
|
31b71b5f30565badca345e475d03976fb83c5955
|
b164e6016d003dfd55e2b40d43d20078301e4c94
|
refs/heads/master
| 2023-07-23T03:25:12.213031
| 2023-07-21T21:30:56
| 2023-07-21T21:30:56
| 139,004,783
| 191
| 44
| null | 2023-07-21T21:27:11
| 2018-06-28T10:42:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,294
|
py
|
analyze_progress.py
|
#!/usr/bin/env python
from __future__ import print_function
import subprocess
import sys
import json
import os
import platform
def main():
binary_base_name = sys.argv[1]
machine = platform.machine()
if platform.system() == "Darwin":
machine = "darwin-" + machine
elif platform.system() == "Windows":
machine = "windows-" + machine + ".exe"
gcode = sys.argv[2]
mcodes = None
if len(sys.argv) > 3:
mcodes = sys.argv[3]
cmd = [
os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
"{}.{}".format(binary_base_name, machine)),
gcode]
if mcodes:
cmd += [mcodes]
print("Running: {}".format(" ".join('"{}"'.format(c) for c in cmd)), file=sys.stderr)
if not os.path.isfile(cmd[0]):
print("Can't find: {}".format(cmd[0]), file=sys.stderr)
sys.exit(2)
if not os.access(cmd[0], os.X_OK):
print("Not executable: {}".format(cmd[0]), file=sys.stderr)
sys.exit(3)
try:
output = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
progress = []
result = {}
first_filament = None
last_filament = None
max_filament = None
most_recent_progress = float("-inf")
last_row = None
for line in output.stdout:
if not line:
continue
if line.startswith(b"Progress:"):
line = line[len("Progress:"):]
(filepos, filament, time) = map(float, line.split(b","))
if filament > 0 and not first_filament:
first_filament = filepos
if not max_filament or filament > max_filament:
last_filament = filepos
max_filament = filament
last_filament_row = [filepos, time]
if filepos == first_filament or most_recent_progress+60 < time:
most_recent_progress = time
progress.append([filepos, time])
last_row = None
else:
last_row = [filepos, time]
elif line.startswith(b"Analysis:"):
line = line[len("Analysis:"):]
result.update(json.loads(line))
if last_row:
progress.append(last_row)
result["firstFilament"] = first_filament
result["lastFilament"] = last_filament
total_time = progress[-1][1]
result["progress"] = [[0, total_time]]
for progress_entry in progress:
if last_filament_row and progress_entry[0] > last_filament_row[0]:
# Squeeze this row into the right spot.
result["progress"].append([last_filament_row[0],
total_time-last_filament_row[1]])
last_filament_row = None
if not last_filament_row or progress_entry[0] != last_filament_row[0]:
if result["progress"][-1][0] == progress_entry[0]:
# Overwrite instead of append.
result["progress"][-1] = ([progress_entry[0],
total_time-progress_entry[1]])
else:
result["progress"].append(
[progress_entry[0],
total_time-progress_entry[1]])
if last_filament_row:
# We didn't ge to add it earlier so add it now.
result["progress"].append([last_filament_row[0],
total_time-last_filament_row[1]])
result["progress"].append([1, 0])
result["estimatedPrintTime"] = total_time
print(json.dumps(result))
sys.exit(output.wait())
if __name__ == "__main__":
main()
|
94475dddbe898b5c015e6be008f1f662e4ad6ec6
|
76787cd4117d71377bd27d251b6d30b41922ff67
|
/draft_kings/data.py
|
bc4d512a9725658406709eb0c5e9aca272830824
|
[
"MIT"
] |
permissive
|
jaebradley/draftkings_client
|
50ba0f25e38b78f75d92a57660bfb110e3a27e69
|
2184e2e3cf66bfe9e4cc6f6d577c80602ab7121a
|
refs/heads/v3
| 2022-12-09T14:35:50.263181
| 2022-01-19T06:36:24
| 2022-01-19T06:36:24
| 73,451,976
| 138
| 47
|
MIT
| 2022-12-08T01:23:13
| 2016-11-11T06:29:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
data.py
|
from enum import Enum
class Sport(Enum):
ARENA_FOOTBALL_LEAGUE = "ARENA FOOTBALL LEAGUE"
AUSTRALIAN_FOOTBALL_LEAGUE = "AUSTRALIAN FOOTBALL LEAGUE"
NFL = "NFL"
NHL = "NHL"
NBA = "NBA"
NASCAR = "NASCAR"
SOCCER = "SOCCER"
GOLF = "GOLF"
CALL_OF_DUTY = "CALL OF DUTY"
CFL = "CFL"
COLLEGE_FOOTBALL = "COLLEGE FOOTBALL"
COLLEGE_BASKETBALL = "COLLEGE BASKETBALL"
COUNTER_STRIKE_GLOBAL_OFFENSIVE = "COUNTER STRIKE: GLOBAL OFFENSIVE"
MIXED_MARTIAL_ARTS = "MIXED MARTIAL ARTS"
EUROLEAGUE_BASKETBALL = "EUROLEAGUE BASKETBALL"
IRACING = "IRACING"
MLB = "MLB"
TENNIS = "TENNIS"
LEAGUE_OF_LEGENDS = "LEAGUE OF LEGENDS"
ROCKET_LEAGUE = "ROCKET LEAGUE"
XFL = "XFL"
# https://api.draftkings.com/sites/US-DK/sports/v1/sports?format=json
SPORT_ID_TO_SPORT = {
1: Sport.NFL,
2: Sport.MLB,
3: Sport.NHL,
4: Sport.NBA,
6: Sport.COLLEGE_BASKETBALL,
5: Sport.COLLEGE_FOOTBALL,
9: Sport.MIXED_MARTIAL_ARTS,
10: Sport.NASCAR,
11: Sport.LEAGUE_OF_LEGENDS,
12: Sport.SOCCER,
13: Sport.GOLF,
14: Sport.CFL,
15: Sport.EUROLEAGUE_BASKETBALL,
16: Sport.TENNIS,
17: Sport.ARENA_FOOTBALL_LEAGUE,
18: Sport.XFL,
19: Sport.COUNTER_STRIKE_GLOBAL_OFFENSIVE,
20: Sport.ROCKET_LEAGUE,
22: Sport.CALL_OF_DUTY,
23: Sport.AUSTRALIAN_FOOTBALL_LEAGUE,
24: Sport.IRACING,
}
SPORT_TO_CONTESTS_ABBREVIATION = {
Sport.NFL: "NFL",
Sport.NHL: "NHL",
Sport.NBA: "NBA",
Sport.CFL: "CFL",
Sport.COLLEGE_FOOTBALL: "CFB",
Sport.MIXED_MARTIAL_ARTS: "MMA",
Sport.NASCAR: "NAS",
Sport.SOCCER: "SOC",
Sport.EUROLEAGUE_BASKETBALL: "EL",
Sport.MLB: "MLB",
Sport.TENNIS: "TEN",
Sport.LEAGUE_OF_LEGENDS: "LOL",
Sport.GOLF: "GOLF",
Sport.COLLEGE_BASKETBALL: "CBB",
Sport.XFL: "XFL",
Sport.COUNTER_STRIKE_GLOBAL_OFFENSIVE: "CS:GO",
Sport.ROCKET_LEAGUE: "RL",
Sport.CALL_OF_DUTY: "COD",
Sport.IRACING: "IRACE"
}
CONTEST_SPORT_ABBREVIATIONS_TO_SPORTS = {v: k for k, v in SPORT_TO_CONTESTS_ABBREVIATION.items()}
|
7332796c073926f13207a0229c7ffd8a5d316d81
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/ecco.py
|
8ee98e820250ace4ee56f3713d9a46d6c1bfc071
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,713
|
py
|
ecco.py
|
import scrapy
from locations.hours import DAYS_FULL, OpeningHours
from locations.items import Feature
class EccoSpider(scrapy.Spider):
name = "ecco"
item_attributes = {"brand": "Ecco", "brand_wikidata": "Q1280255"}
start_urls = [
"https://se.ecco.com/api/store/search?latitudeMin=-90&longitudeMin=-180&latitudeMax=90&longitudeMax=180"
]
custom_settings = {"ROBOTSTXT_OBEY": False}
def parse(self, response):
for store in response.json():
if store["t"] in [
# 0, # PARTNER # Just sell the stock?
1, # ECCO
2, # Outlet
]:
yield scrapy.Request(
url="https://se.ecco.com/api/store/finder/" + store["i"], callback=self.parse_store
)
def parse_store(self, response):
store = response.json()
if store["StoreType"] == "FullPrice":
return # Online/virtual store
item = Feature()
item["ref"] = store["StoreId"]
item["name"] = store["Name"]
item["street_address"] = store["Street"]
item["housenumber"] = store["HouseNr"]
item["city"] = store["City"]
item["postcode"] = store["PostalCode"]
item["phone"] = store["Phone"]
item["country"] = store["CountryCode"]
item["email"] = store["Email"]
item["lat"] = store["Latitude"]
item["lon"] = store["Longitude"]
item["extras"]["store_type"] = store["StoreType"]
oh = OpeningHours()
for day in DAYS_FULL:
oh.add_range(day, store[f"{day}Open"], store[f"{day}Close"], time_format="%H:%M:%S")
item["opening_hours"] = oh
yield item
|
681f77fd01bf23c9bb0bdfac02037dbc1deaf1ab
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/seq2seq/src/seq2seq_model/bleu_calculate.py
|
1bddff545bae919486a245bc5f5eb0623a59907f
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,830
|
py
|
bleu_calculate.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Calculate the blue scores"""
import subprocess
import numpy as np
from src.dataset.tokenizer import Tokenizer
def load_result_data(result_npy_addr):
# load the numpy to list.
result = np.load(result_npy_addr, allow_pickle=True)
return result
def get_bleu_data(tokenizer: Tokenizer, result_npy_addr):
"""
Detokenizer the prediction.
Args:
tokenizer (Tokenizer): tokenizer operations.
result_npy_addr (string): Path to the predict file.
Returns:
List, the predict text context.
"""
result = load_result_data(result_npy_addr)
prediction_list = []
for _, info in enumerate(result):
# prediction detokenize
prediction = info["prediction"]
prediction_str = tokenizer.detokenize(prediction)
prediction_list.append(prediction_str)
return prediction_list
def calculate_sacrebleu(predict_path, target_path):
"""
Calculate the BLEU scores.
Args:
predict_path (string): Path to the predict file.
target_path (string): Path to the target file.
Returns:
Float32, bleu scores.
"""
sacrebleu_params = '--score-only -lc --tokenize intl'
sacrebleu = subprocess.run([f'sacrebleu {target_path} --input {predict_path} {sacrebleu_params}'],
stdout=subprocess.PIPE, shell=True)
bleu_scores = round(float(sacrebleu.stdout.strip()), 2)
return bleu_scores
def bleu_calculate(tokenizer, result_npy_addr, target_addr=None):
"""
Calculate the BLEU scores.
Args:
tokenizer (Tokenizer): tokenizer operations.
result_npy_addr (string): Path to the predict file.
target_addr (string): Path to the target file.
Returns:
Float32, bleu scores.
"""
prediction = get_bleu_data(tokenizer, result_npy_addr)
print("predict:\n", prediction)
eval_path = './predict.txt'
with open(eval_path, 'w') as eval_file:
lines = [line + '\n' for line in prediction]
eval_file.writelines(lines)
reference_path = target_addr
bleu_scores = calculate_sacrebleu(eval_path, reference_path)
return bleu_scores
|
0b2533bc138e5d74170fb13f103c74b06ca7d78c
|
5f7335fb5240590eaebf751ec2c37ffbfb92d08b
|
/test_scripts/module_with_triple_quotes/greeting.py
|
ca94bf178572948b6877db4fe0b1057a666de7ae
|
[
"BSD-2-Clause"
] |
permissive
|
mwilliamson/stickytape
|
41523ff4650384634660ebe164a2e637319ed4cb
|
7e8d4dc2ace78d4f99e4c7a0f49273abf341b187
|
refs/heads/master
| 2023-05-10T23:12:21.253570
| 2023-05-01T15:04:37
| 2023-05-01T15:04:37
| 6,472,845
| 209
| 29
|
BSD-2-Clause
| 2022-10-05T20:45:32
| 2012-10-31T11:27:37
|
Python
|
UTF-8
|
Python
| false
| false
| 33
|
py
|
greeting.py
|
message = "Hello\n'''\n" + '"""'
|
e4128a61b889e8978a8c46017c9e7b94b62fd4a6
|
f513818b0b9db1e345de3e390dad71e692987c9a
|
/python/bllipparser/RerankerFeatureCorpus.py
|
1f2f8758f87f48e0a9ff3fe46d19b8d1cf221f1f
|
[
"Apache-2.0"
] |
permissive
|
BLLIP/bllip-parser
|
ee20570eae2b31e316ca46e5157de921432ba1a0
|
0eb1ed848a7be0f222e23d451d786a6a0ac24e08
|
refs/heads/master
| 2021-11-24T03:06:42.479290
| 2021-11-07T19:34:41
| 2021-11-07T19:34:41
| 1,418,858
| 209
| 58
| null | 2017-09-30T20:29:27
| 2011-02-27T19:29:34
|
GAP
|
UTF-8
|
Python
| false
| false
| 13,944
|
py
|
RerankerFeatureCorpus.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
cvlm corpus read/transform support.
Data structures and utilities to read the format that cvlm (reranker
optimizer) takes as input (sparse feature values associated with each
candidate).
This needs the waterworks utility library.
(https://github.com/dmcc/waterworks)
Example:
>>> corpus = RerankerFeatureCorpus('path/to/filename.gz')
>>> for sentence in corpus:
... print('index', sentence.index, 'num parses', len(sentence.parses))
... print('num parse0 features', len(sentence.parses[0].features))
"""
from collections import defaultdict
from waterworks.Strings import try_parse_float, try_parse_int
from waterworks.Files import possibly_compressed_file
from waterworks.Tools import initialize, generic_repr
import PrecRec
from AIMA import argmax_list
def parse_kv_list(text):
"""Parse cvlm key-value pairs from text. Returns a default dictionary
(missing features will have value 0.0)."""
pieces = text.split()
results = defaultdict(float)
for piece in pieces:
if '=' in piece:
key, value = piece.split('=')
if value.endswith(','):
value = value[:-1]
value = try_parse_float(value, value)
else:
key = piece
value = 1
if key.endswith(','):
key = key[:-1]
key = try_parse_int(key, key)
results[key] = value
return results
def generate_kv_list(features):
"""Render cvlm key-value pairs to text from a dictionary."""
pieces = []
for k, v in sorted(features.items()):
if v == 1:
pieces.append(str(k))
else:
pieces.append('%s=%s' % (k, v))
return ' '.join(pieces)
class FeatureMapping(dict):
"""Subclass of dictionary with IO for handling cvlm feature mappings
and weights. The mapping is stored as
{ feature index : feature name/weight }"""
def write(self, filename):
f = possibly_compressed_file(filename, 'w')
for index in range(len(self)):
name = self[index]
f.write('%d\t%s\n' % (index, name))
f.close()
@classmethod
def weights_from_filename(this_class, filename):
"""Reads cvlm weight vectors from a filename. The expected format
is that each line has an index followed by an equals sign followed
by the feature weight (a float). Returns a FeatureMapping."""
weights = this_class()
for line in possibly_compressed_file(filename):
index, weight = line.split('=')
index = int(index)
weight = float(weight)
weights[index] = weight
return weights
@classmethod
def mapping_from_filename(this_class, filename):
"""Reads cvlm feature mapping from a filename. The expected
format is that each line has an index followed by a tab followed
by the feature name. Returns a FeatureMapping."""
mapping = this_class()
for line in possibly_compressed_file(filename):
index, name = line.split('\t')
index = int(index)
mapping[index] = name.strip()
return mapping
class RerankerParse:
"""A single parse of a RerankerSentence. Each parse includes
the number of proposed and matched brackets (which, combined with
gold_brackets will tell you its f-score) and a sparse feature vector
(dictionary of features to values)."""
def __init__(self, proposed_brackets, matched_brackets, features):
features = defaultdict(int, features)
initialize(self, locals())
__repr__ = generic_repr
def nonzero_features(self):
"""Returns a set of feature names with non-zero values."""
return set(feature for (feature, value) in self.features.items()
if value != 0)
def subtract_features(self, other_parse):
"""Shifts all feature values by those in another RerankerParse.
As a result, shifting a RerankerParse by itself will set all of
its features to zero."""
if self == other_parse:
self.features.clear()
else:
for index, value in other_parse.features.items():
self.features[index] -= value
def map_and_prune(self, mapping):
"""Remaps the feature names according to a mapping:
{ old feature name : new feature name }
If the new feature name is None, the feature will be pruned. The
mapping must completely cover the features in this parse (that
is, each feature name for this parse must have an entry in the
mapping). This is to help ensure that your mapping is compatible
with features on this parse."""
mapped_features = {}
for old_name, value in self.features.items():
new_name = mapping[old_name]
if new_name is not None and value != 0:
mapped_features[new_name] = value
self.features = mapped_features
def score(self, weights):
"""Score this parse using a weight vector (dictionary of feature
names to weights)."""
total_score = 0
for feature, value in self.features.items():
weight = weights[feature]
total_score += weight * value
return total_score
def cvlm_format(self):
"""Render this parse in cvlm's sparse feature vector format."""
meta = 'P=%s W=%s' % (self.proposed_brackets, self.matched_brackets)
if self.features:
return '%s %s' % (meta, generate_kv_list(self.features))
else:
return meta
class RerankerSentence:
"""A single sentence for input to cvlm. Each sentence includes the
number of gold brackets, its index in the corpus, and a list of
all candidate parses with their features and evaluation information
(RerankerParse objects).
Each RerankerParse has "winner" parses (parses with the highest
f-score in the sentence -- there can be ties) and "loser" parses
(all other parses)."""
def __init__(self, gold_brackets, parses, index):
initialize(self, locals())
__repr__ = generic_repr
def __iter__(self):
"""Iterate over all the RerankerParse objects in this sentence."""
return iter(self.parses)
def __len__(self):
"""Returns the number of RerankerParse objects in this sentence."""
return len(self.parses)
def __getitem__(self, index):
"""Retrieves a RerankerParse object in this sentence by its index."""
return self.parses[index]
def relativize_feature_values(self, relative_index=0):
"""Make features in all the RerankerParse objects relative by
shifting all feature values by those in the top parse (you can
pick a different parse by setting the relative_index flag). This
will set all features in the top parse to zero and potentially
simplify the features of the remaining parses (assuming their
features look mostly like those in the top parse -- in some cases,
this can increase the number of non-zero features)."""
if not self.parses:
return
rel_parse = self.parses[relative_index]
for parse in self.parses:
if parse == rel_parse:
continue
parse.subtract_features(rel_parse)
rel_parse.subtract_features(rel_parse)
def fscore_components(self, parse):
"""Returns the f-score components (matched, gold, proposed)
of a RerankerParse in this sentence."""
return (parse.matched_brackets, self.gold_brackets,
parse.proposed_brackets)
def fscore(self, parse):
"""Returns the f-score of a RerankerParse in this sentence."""
components = self.fscore_components(parse)
return PrecRec.fscore_from_components(*components)
def winner_parses(self):
"""Returns a list of the "winner" parses."""
return argmax_list(self.parses, self.fscore)
def oracle_fscore(self):
"""Returns the highest f-score from one of the candidate parses
(all winner parses will have this f-score)."""
return max(self.fscore(parse) for parse in self.parses)
def distinguishing_feature_counts(self):
"""Returns a dictionary mapping feature indices to the number
of times they distinguished a "winner" from "loser" parses."""
feature_counts = defaultdict(int)
winners = self.winner_parses()
all_features = set()
for parse in self:
all_features.update(parse.features.keys())
if len(winners) == len(self.parses):
# no losers means nothing is distinguishing.
# to build a complete mapping, we still store the counts of the
# features as 0s.
for feature in all_features:
feature_counts[feature] = 0
return feature_counts
losers = [parse for parse in self.parses if parse not in winners]
all_winner_features = set()
nonzero_features = set()
# find all values of features for any winner parses
for winner in winners:
all_winner_features.update(winner.features.items())
nonzero_features.update(winner.nonzero_features())
# now find features of any loser parse with a different value
# (unless they only show up with 0 as their value -- these
# aren't really distinguishing, but could appear as such if they're
# unspecified in the winner parses and explicitly set to 0 in the
# lower parses)
all_loser_features = set()
for loser in losers:
all_loser_features.update(loser.features.items())
nonzero_features.update(loser.nonzero_features())
diffs = all_loser_features.symmetric_difference(all_winner_features)
distinguishing_features = set(feature for (feature, value) in diffs)
for feature in all_features:
if feature in distinguishing_features and \
feature in nonzero_features:
feature_counts[feature] += 1
else:
feature_counts[feature] = 0
return feature_counts
def map_and_prune(self, mapping):
"""Applies RerankerParse.map_and_prune() to every parse in
this sentence."""
for parse in self.parses:
parse.map_and_prune(mapping)
def rerank(self, weights):
"""Score all parses and sort them by their scores using a weight
vector (dictionary of feature names to weights). Note that this
modifies the list of parses in place."""
self.parses.sort(key=lambda parse: parse.score(weights),
reverse=True)
def cvlm_format(self):
"""Render this sentence in cvlm's sparse feature vector format."""
return 'G=%s N=%s %s,' % (self.gold_brackets, len(self.parses),
', '.join(parse.cvlm_format()
for parse in self.parses))
@classmethod
def from_string(this_class, text, index):
parses_text = text.split(', ')
gold_brackets = None
parses = []
for parse_index, parse_text in enumerate(parses_text):
features = parse_kv_list(parse_text)
if parse_index == 0:
gold_brackets = features.pop('G')
features.pop('N')
proposed_brackets = features.pop('P')
matched_brackets = features.pop('W')
parses.append(RerankerParse(proposed_brackets, matched_brackets,
features))
assert gold_brackets is not None
return this_class(gold_brackets, parses, index)
class RerankerFeatureCorpus:
"""Made up of a series of sentences. Because these files are huge
and the Python wrappers around these structures cannot typically be
stored in memory, this only lets you iterate over the corpus. Note
that if you're generating a new reranker input file for cvlm,
you'll need to write the result from cvlm_format_header() followed
by the cvlm_format() for each sentence in the corpus. The number
of sentences in the RerankerFeatureCorpus corpus is available as
its length."""
def __init__(self, filename):
initialize(self, locals())
self.reader = iter(possibly_compressed_file(filename))
self.header = parse_kv_list(next(self.reader))
assert 'S' in self.header
self.num_sentences = self.header['S']
__repr__ = generic_repr
def cvlm_format_header(self):
"""Return the header in cvlm format."""
return 'S=%d\n' % self.num_sentences
def __len__(self):
"""Returns the number of sentences in this corpus."""
return self.num_sentences
def __iter__(self):
"""Returns an iterator over each sentence in the corpus."""
for i, line in enumerate(self.reader):
sentence = RerankerSentence.from_string(line, i)
sentence.header = self.header
yield sentence
def transform(self, transformer):
"""Iterate over every sentence in this corpus, applying a
transformation function to each. The transformer will be called
on each RerankerSentence instance in order."""
for sentence in self:
yield transformer(sentence)
|
4f3b064950bfd0b010f05ea11239eeb1313c05d5
|
5eb52c07e5b1bd00af77306f927f382b684cd6ff
|
/indy_node/test/persistence/test_idr_cache_update_after_catchup.py
|
9926c13a16431b4e585aa33ca65dcb5e0eb31dfc
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-node
|
bce39486988f5114581cff4f6d14fc1b7684143c
|
e6bb87d4c605aff9914491d062248b6ec857334c
|
refs/heads/main
| 2023-09-03T15:33:08.187153
| 2023-05-08T22:48:21
| 2023-05-08T22:48:21
| 77,021,566
| 691
| 783
|
Apache-2.0
| 2023-05-09T15:42:43
| 2016-12-21T05:45:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
test_idr_cache_update_after_catchup.py
|
import json
from indy.ledger import build_nym_request, sign_request, submit_request
from indy_common.state import domain
from indy_node.test.helper import start_stopped_node, createHalfKeyIdentifierAndAbbrevVerkey
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.txn_util import get_txn_time
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected
def test_idr_cache_update_after_catchup(txnPoolNodeSet,
looper,
sdk_pool_handle,
sdk_wallet_steward,
tconf,
tdir,
allPluginsPath):
wallet_handle, identifier = sdk_wallet_steward
node_to_disconnect = txnPoolNodeSet[-1]
disconnect_node_and_ensure_disconnected(looper,
txnPoolNodeSet,
node_to_disconnect.name,
stopNode=True)
looper.removeProdable(node_to_disconnect)
idr, verkey = createHalfKeyIdentifierAndAbbrevVerkey()
request = looper.loop.run_until_complete(build_nym_request(identifier, idr, verkey, None, None))
req_signed = looper.loop.run_until_complete(sign_request(wallet_handle, identifier, request))
result = json.loads(looper.loop.run_until_complete(submit_request(sdk_pool_handle, req_signed)))
restarted_node = start_stopped_node(node_to_disconnect, looper,
tconf, tdir, allPluginsPath)
txnPoolNodeSet[-1] = restarted_node
waitNodeDataEquality(looper, restarted_node, *txnPoolNodeSet[:-1])
root_hash = restarted_node.db_manager.ts_store.get_equal_or_prev(get_txn_time(result['result']))
key = domain.make_state_path_for_nym(idr)
from_state = restarted_node.getState(DOMAIN_LEDGER_ID).get_for_root_hash(root_hash=root_hash,
key=key)
assert from_state
deserialized = restarted_node.write_manager.state_serializer.deserialize(from_state)
assert deserialized
items_after = restarted_node.db_manager.idr_cache.get(idr)
assert items_after
|
10c9478ac458608f417d69b783997ffa553b8788
|
146c65138cf8665918193c74a185ceab0ba7009b
|
/tests/testing/units.py
|
90918aea55bbacc028653f4732ff48d1cf1a76ea
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
harvard-acc/gem5-aladdin
|
b962dd2103e938354c05d81e6843c145ac05e085
|
d4efbee56d71f9609eab85393eff58f5dbf7763c
|
refs/heads/master
| 2022-11-11T12:25:29.032584
| 2022-01-21T06:26:00
| 2022-01-21T06:26:00
| 70,526,994
| 194
| 66
|
BSD-3-Clause
| 2022-10-06T04:50:52
| 2016-10-10T20:41:00
|
C++
|
UTF-8
|
Python
| false
| false
| 10,268
|
py
|
units.py
|
#!/usr/bin/env python2.7
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from abc import ABCMeta, abstractmethod
from datetime import datetime
import difflib
import functools
import os
import re
import subprocess
import sys
import traceback
from results import UnitResult
from helpers import *
_test_base = os.path.join(os.path.dirname(__file__), "..")
class TestUnit(object):
"""Base class for all test units.
A test unit is a part of a larger test case. Test cases usually
contain two types of units, run units (run gem5) and verify units
(diff output files). All unit implementations inherit from this
class.
A unit implementation overrides the _run() method. The test runner
calls the run() method, which wraps _run() to protect against
exceptions.
"""
__metaclass__ = ABCMeta
def __init__(self, name, ref_dir, test_dir, skip=False):
self.name = name
self.ref_dir = ref_dir
self.test_dir = test_dir
self.force_skip = skip
self.start_time = None
self.stop_time = None
def result(self, state, **kwargs):
if self.start_time is not None and "runtime" not in kwargs:
self.stop_time = datetime.utcnow()
delta = self.stop_time - self.start_time
kwargs["runtime"] = delta.total_seconds()
return UnitResult(self.name, state, **kwargs)
def ok(self, **kwargs):
return self.result(UnitResult.STATE_OK, **kwargs)
def skip(self, **kwargs):
return self.result(UnitResult.STATE_SKIPPED, **kwargs)
def error(self, message, **kwargs):
return self.result(UnitResult.STATE_ERROR, message=message, **kwargs)
def failure(self, message, **kwargs):
return self.result(UnitResult.STATE_FAILURE, message=message, **kwargs)
def ref_file(self, fname):
return os.path.join(self.ref_dir, fname)
def out_file(self, fname):
return os.path.join(self.test_dir, fname)
def _read_output(self, fname, default=""):
try:
with open(self.out_file(fname), "r") as f:
return f.read()
except IOError:
return default
def run(self):
self.start_time = datetime.utcnow()
try:
if self.force_skip:
return self.skip()
else:
return self._run()
except:
return self.error("Python exception:\n%s" % traceback.format_exc())
@abstractmethod
def _run(self):
pass
class RunGem5(TestUnit):
"""Test unit representing a gem5 run.
Possible failure modes:
- gem5 failed to run -> STATE_ERROR
- timeout -> STATE_ERROR
- non-zero exit code -> STATE_ERROR
Possible non-failure results:
- exit code == 0 -> STATE_OK
- exit code == 2 -> STATE_SKIPPED
"""
def __init__(self, gem5, gem5_args, timeout=0, **kwargs):
super(RunGem5, self).__init__("gem5", **kwargs)
self.gem5 = gem5
self.args = gem5_args
self.timeout = timeout
def _run(self):
gem5_cmd = [
self.gem5,
"-d", self.test_dir,
"--stats-file", "text://stats.txt?desc=False",
"-re",
] + self.args
try:
with ProcessHelper(gem5_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
status, gem5_stdout, gem5_stderr = p.call(timeout=self.timeout)
except CallTimeoutException as te:
return self.error("Timeout", stdout=te.stdout, stderr=te.stderr)
except OSError as ose:
return self.error("Failed to launch gem5: %s" % ose)
stderr = "\n".join([
"*** gem5 stderr ***",
gem5_stderr,
"",
"*** m5out/simerr ***",
self._read_output("simerr"),
])
stdout = "\n".join([
"*** gem5 stdout ***",
gem5_stdout,
"",
"*** m5out/simout ***",
self._read_output("simout"),
])
# Signal
if status < 0:
return self.error("gem5 terminated by signal %i" % (-status, ),
stdout=stdout, stderr=stderr)
elif status == 2:
return self.skip(stdout=stdout, stderr=stderr)
elif status > 0:
return self.error("gem5 exited with non-zero status: %i" % status,
stdout=stdout, stderr=stderr)
else:
return self.ok(stdout=stdout, stderr=stderr)
class DiffOutFile(TestUnit):
"""Test unit comparing and output file and a reference file."""
# regular expressions of lines to ignore when diffing outputs
diff_ignore_regexes = {
"simout" : [
re.compile('^Redirecting (stdout|stderr) to'),
re.compile('^gem5 compiled '),
re.compile('^gem5 started '),
re.compile('^gem5 executing on '),
re.compile('^command line:'),
re.compile("^Couldn't import dot_parser,"),
re.compile("^info: kernel located at:"),
re.compile("^Couldn't unlink "),
re.compile("^Using GPU kernel code file\(s\) "),
],
"simerr" : [
#re.compile('^Simulation complete at'),
],
"config.ini" : [
re.compile("^(executable|readfile|kernel|image_file)="),
re.compile("^(cwd|input|codefile)="),
],
"config.json" : [
re.compile(r'''^\s*"(executable|readfile|kernel|image_file)":'''),
re.compile(r'''^\s*"(cwd|input|codefile)":'''),
],
}
def __init__(self, fname, **kwargs):
super(DiffOutFile, self).__init__("diff[%s]" % fname,
**kwargs)
self.fname = fname
self.line_filters = DiffOutFile.diff_ignore_regexes.get(fname, tuple())
def _filter_file(self, fname):
def match_line(l):
for r in self.line_filters:
if r.match(l):
return True
return False
with open(fname, "r") as f:
for l in f:
if not match_line(l):
yield l
def _run(self):
fname = self.fname
ref = self.ref_file(fname)
out = self.out_file(fname)
if not os.path.exists(ref):
return self.error("%s doesn't exist in reference directory" \
% fname)
if not os.path.exists(out):
return self.error("%s doesn't exist in output directory" % fname)
diff = difflib.unified_diff(
tuple(self._filter_file(ref)),
tuple(self._filter_file(out)),
fromfile="ref/%s" % fname, tofile="out/%s" % fname)
diff = list(diff)
if diff:
return self.error("ref/%s and out/%s differ" % (fname, fname),
stderr="".join(diff))
else:
return self.ok(stdout="-- ref/%s and out/%s are identical --" \
% (fname, fname))
class DiffStatFile(TestUnit):
"""Test unit comparing two gem5 stat files."""
def __init__(self, **kwargs):
super(DiffStatFile, self).__init__("stat_diff", **kwargs)
self.stat_diff = os.path.join(_test_base, "diff-out")
def _run(self):
STATUS_OK = 0
STATUS_NEW_STATS = 1
STATUS_FAILED = 2
stats = "stats.txt"
cmd = [
self.stat_diff,
self.ref_file(stats), self.out_file(stats),
]
with ProcessHelper(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
status, stdout, stderr = p.call()
if status in (STATUS_OK, STATUS_NEW_STATS):
return self.ok(stdout=stdout, stderr=stderr)
elif status == STATUS_FAILED:
return self.failure("Statistics mismatch",
stdout=stdout, stderr=stderr)
else:
return self.error("diff-out returned an error: %i" % status,
stdout=stdout, stderr=stderr)
|
521d9cb3f7349a65e0c23b2ae2129bdc852bea55
|
c5fd80ede07f0972a9b99d0c65a0df40e6d487fa
|
/test/unit/test_mockcore.py
|
9c2549da0024e7a94d386a64f2db88d0e2e69035
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pyocd/pyOCD
|
46330f3a10c9be381293d220cc025e0e347513ce
|
9253740baf46ebf4eacbce6bf3369150c5fb8ee0
|
refs/heads/main
| 2023-08-18T07:56:54.205305
| 2023-08-13T19:11:01
| 2023-08-13T19:11:01
| 13,862,423
| 507
| 204
|
Apache-2.0
| 2023-09-09T20:13:57
| 2013-10-25T14:10:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,809
|
py
|
test_mockcore.py
|
# pyOCD debugger
# Copyright (c) 2016-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import logging
from .mockcore import MockCore
from pyocd.core import memory_map
from pyocd.coresight.cortex_m_core_registers import index_for_reg
from pyocd.utility import conversion
from pyocd.utility import mask
# @pytest.fixture(scope='function')
# def mockcore():
# return MockCore()
# Basic tests of MockCore memory simulation.
class TestMockCoreMem:
def test_read8_flash(self, mockcore):
assert mockcore.read_memory_block8(0, 4) == [0xff, 0xff, 0xff, 0xff]
def test_read8_ram(self, mockcore):
assert mockcore.read_memory_block8(0x20000000, 4) == [0, 0, 0, 0]
def test_read32_flash(self, mockcore):
assert mockcore.read_memory_block32(0, 1) == [0xffffffff]
def test_read32_ram(self, mockcore):
assert mockcore.read_memory_block32(0x20000000, 1) == [0x00000000]
def test_write8_flash(self, mockcore):
mockcore.write_memory_block8(0x100, [0xaa, 0xbb, 0xcc, 0xdd])
assert mockcore.read_memory_block8(0xfe, 8) == [0xff, 0xff, 0xaa, 0xbb, 0xcc, 0xdd, 0xff, 0xff]
def test_write32_flash(self, mockcore):
mockcore.write_memory_block32(0x100, [0xaabbccdd])
assert mockcore.read_memory_block32(0xfc, 3) == [0xffffffff, 0xaabbccdd, 0xffffffff]
def test_write8_ram(self, mockcore):
mockcore.write_memory_block8(0x20000100, [0xaa, 0xbb, 0xcc, 0xdd])
assert mockcore.read_memory_block8(0x200000fe, 8) == [0x00, 0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x00, 0x00]
def test_write32_ram(self, mockcore):
mockcore.write_memory_block32(0x20000100, [0xaabbccdd])
assert mockcore.read_memory_block32(0x200000fc, 3) == [0x00000000, 0xaabbccdd, 0x00000000]
# Basic tests of MockCore register simulation.
class TestMockCoreReg:
def test_rw_r0_r15(self, mockcore):
for r in range(0, 16):
mockcore.write_core_registers_raw([r], [1+r])
for r in range(0, 16):
assert mockcore.read_core_registers_raw([r]) == [1+r]
def test_rw_cfbp(self, mockcore):
mockcore.write_core_registers_raw([index_for_reg('cfbp')], [0x01020304])
assert mockcore.read_core_registers_raw([
index_for_reg('control'),
index_for_reg('faultmask'),
index_for_reg('basepri'),
index_for_reg('primask')]) == [0x01, 0x02, 0x03, 0x04]
def test_w_control(self, mockcore):
mockcore.write_core_registers_raw([index_for_reg('control')], [0xaa])
assert mockcore.read_core_registers_raw([index_for_reg('cfbp')]) == [0xaa000000]
def test_w_faultmask(self, mockcore):
mockcore.write_core_registers_raw([index_for_reg('faultmask')], [0xaa])
mockcore.read_core_registers_raw([index_for_reg('cfbp')]) == [0x00aa0000]
def test_w_basepri(self, mockcore):
mockcore.write_core_registers_raw([index_for_reg('basepri')], [0xaa])
mockcore.read_core_registers_raw([index_for_reg('cfbp')]) == [0x0000aa00]
def test_w_primask(self, mockcore):
mockcore.write_core_registers_raw([index_for_reg('primask')], [0xaa])
mockcore.read_core_registers_raw([index_for_reg('cfbp')]) == [0x000000aa]
|
36fb6f2f3d27a0c87a13dedea50c0b178f825421
|
5655a9fa1371274fb9d61bbb652e13eec0595468
|
/runtime/Python3/src/antlr4/Lexer.py
|
82accadcbca680b48f4c0206731259d32adc299e
|
[
"BSD-3-Clause"
] |
permissive
|
antlr/antlr4
|
68f3cbb13eefa1638569fe9f4f2f96e048255cd1
|
e0df58f5185cb3c9148eab724a11394050c565ca
|
refs/heads/dev
| 2023-08-29T14:07:51.178320
| 2023-08-28T15:29:08
| 2023-08-28T16:46:56
| 501,687
| 15,379
| 3,965
|
BSD-3-Clause
| 2023-09-09T22:21:56
| 2010-02-04T01:36:28
|
Java
|
UTF-8
|
Python
| false
| false
| 11,542
|
py
|
Lexer.py
|
# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# A lexer is recognizer that draws input symbols from a character stream.
# lexer grammars result in a subclass of self object. A Lexer object
# uses simplified match() and error recovery mechanisms in the interest
# of speed.
#/
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
from antlr4.CommonTokenFactory import CommonTokenFactory
from antlr4.atn.LexerATNSimulator import LexerATNSimulator
from antlr4.InputStream import InputStream
from antlr4.Recognizer import Recognizer
from antlr4.Token import Token
from antlr4.error.Errors import IllegalStateException, LexerNoViableAltException, RecognitionException
class TokenSource(object):
pass
class Lexer(Recognizer, TokenSource):
__slots__ = (
'_input', '_output', '_factory', '_tokenFactorySourcePair', '_token',
'_tokenStartCharIndex', '_tokenStartLine', '_tokenStartColumn',
'_hitEOF', '_channel', '_type', '_modeStack', '_mode', '_text'
)
DEFAULT_MODE = 0
MORE = -2
SKIP = -3
DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL
HIDDEN = Token.HIDDEN_CHANNEL
MIN_CHAR_VALUE = 0x0000
MAX_CHAR_VALUE = 0x10FFFF
def __init__(self, input:InputStream, output:TextIO = sys.stdout):
super().__init__()
self._input = input
self._output = output
self._factory = CommonTokenFactory.DEFAULT
self._tokenFactorySourcePair = (self, input)
self._interp = None # child classes must populate this
# The goal of all lexer rules/methods is to create a token object.
# self is an instance variable as multiple rules may collaborate to
# create a single token. nextToken will return self object after
# matching lexer rule(s). If you subclass to allow multiple token
# emissions, then set self to the last token to be matched or
# something nonnull so that the auto token emit mechanism will not
# emit another token.
self._token = None
# What character index in the stream did the current token start at?
# Needed, for example, to get the text for current token. Set at
# the start of nextToken.
self._tokenStartCharIndex = -1
# The line on which the first character of the token resides#/
self._tokenStartLine = -1
# The character position of first character within the line#/
self._tokenStartColumn = -1
# Once we see EOF on char stream, next token will be EOF.
# If you have DONE : EOF ; then you see DONE EOF.
self._hitEOF = False
# The channel number for the current token#/
self._channel = Token.DEFAULT_CHANNEL
# The token type for the current token#/
self._type = Token.INVALID_TYPE
self._modeStack = []
self._mode = self.DEFAULT_MODE
# You can set the text for the current token to override what is in
# the input char buffer. Use setText() or can set self instance var.
#/
self._text = None
def reset(self):
# wack Lexer state variables
if self._input is not None:
self._input.seek(0) # rewind the input
self._token = None
self._type = Token.INVALID_TYPE
self._channel = Token.DEFAULT_CHANNEL
self._tokenStartCharIndex = -1
self._tokenStartColumn = -1
self._tokenStartLine = -1
self._text = None
self._hitEOF = False
self._mode = Lexer.DEFAULT_MODE
self._modeStack = []
self._interp.reset()
# Return a token from self source; i.e., match a token on the char
# stream.
def nextToken(self):
if self._input is None:
raise IllegalStateException("nextToken requires a non-null input stream.")
# Mark start location in char stream so unbuffered streams are
# guaranteed at least have text of current token
tokenStartMarker = self._input.mark()
try:
while True:
if self._hitEOF:
self.emitEOF()
return self._token
self._token = None
self._channel = Token.DEFAULT_CHANNEL
self._tokenStartCharIndex = self._input.index
self._tokenStartColumn = self._interp.column
self._tokenStartLine = self._interp.line
self._text = None
continueOuter = False
while True:
self._type = Token.INVALID_TYPE
ttype = self.SKIP
try:
ttype = self._interp.match(self._input, self._mode)
except LexerNoViableAltException as e:
self.notifyListeners(e) # report error
self.recover(e)
if self._input.LA(1)==Token.EOF:
self._hitEOF = True
if self._type == Token.INVALID_TYPE:
self._type = ttype
if self._type == self.SKIP:
continueOuter = True
break
if self._type!=self.MORE:
break
if continueOuter:
continue
if self._token is None:
self.emit()
return self._token
finally:
# make sure we release marker after match or
# unbuffered char stream will keep buffering
self._input.release(tokenStartMarker)
# Instruct the lexer to skip creating a token for current lexer rule
# and look for another token. nextToken() knows to keep looking when
# a lexer rule finishes with token set to SKIP_TOKEN. Recall that
# if token==null at end of any token rule, it creates one for you
# and emits it.
#/
def skip(self):
self._type = self.SKIP
def more(self):
self._type = self.MORE
def mode(self, m:int):
self._mode = m
def pushMode(self, m:int):
if self._interp.debug:
print("pushMode " + str(m), file=self._output)
self._modeStack.append(self._mode)
self.mode(m)
def popMode(self):
if len(self._modeStack)==0:
raise Exception("Empty Stack")
if self._interp.debug:
print("popMode back to "+ self._modeStack[:-1], file=self._output)
self.mode( self._modeStack.pop() )
return self._mode
# Set the char stream and reset the lexer#/
@property
def inputStream(self):
return self._input
@inputStream.setter
def inputStream(self, input:InputStream):
self._input = None
self._tokenFactorySourcePair = (self, self._input)
self.reset()
self._input = input
self._tokenFactorySourcePair = (self, self._input)
@property
def sourceName(self):
return self._input.sourceName
# By default does not support multiple emits per nextToken invocation
# for efficiency reasons. Subclass and override self method, nextToken,
# and getToken (to push tokens into a list and pull from that list
# rather than a single variable as self implementation does).
#/
def emitToken(self, token:Token):
self._token = token
# The standard method called to automatically emit a token at the
# outermost lexical rule. The token object should point into the
# char buffer start..stop. If there is a text override in 'text',
# use that to set the token's text. Override self method to emit
# custom Token objects or provide a new factory.
#/
def emit(self):
t = self._factory.create(self._tokenFactorySourcePair, self._type, self._text, self._channel, self._tokenStartCharIndex,
self.getCharIndex()-1, self._tokenStartLine, self._tokenStartColumn)
self.emitToken(t)
return t
def emitEOF(self):
cpos = self.column
lpos = self.line
eof = self._factory.create(self._tokenFactorySourcePair, Token.EOF, None, Token.DEFAULT_CHANNEL, self._input.index,
self._input.index-1, lpos, cpos)
self.emitToken(eof)
return eof
@property
def type(self):
return self._type
@type.setter
def type(self, type:int):
self._type = type
@property
def line(self):
return self._interp.line
@line.setter
def line(self, line:int):
self._interp.line = line
@property
def column(self):
return self._interp.column
@column.setter
def column(self, column:int):
self._interp.column = column
# What is the index of the current character of lookahead?#/
def getCharIndex(self):
return self._input.index
# Return the text matched so far for the current token or any
# text override.
@property
def text(self):
if self._text is not None:
return self._text
else:
return self._interp.getText(self._input)
# Set the complete text of self token; it wipes any previous
# changes to the text.
@text.setter
def text(self, txt:str):
self._text = txt
# Return a list of all Token objects in input char stream.
# Forces load of all tokens. Does not include EOF token.
#/
def getAllTokens(self):
tokens = []
t = self.nextToken()
while t.type!=Token.EOF:
tokens.append(t)
t = self.nextToken()
return tokens
def notifyListeners(self, e:LexerNoViableAltException):
start = self._tokenStartCharIndex
stop = self._input.index
text = self._input.getText(start, stop)
msg = "token recognition error at: '" + self.getErrorDisplay(text) + "'"
listener = self.getErrorListenerDispatch()
listener.syntaxError(self, None, self._tokenStartLine, self._tokenStartColumn, msg, e)
def getErrorDisplay(self, s:str):
with StringIO() as buf:
for c in s:
buf.write(self.getErrorDisplayForChar(c))
return buf.getvalue()
def getErrorDisplayForChar(self, c:str):
if ord(c[0])==Token.EOF:
return "<EOF>"
elif c=='\n':
return "\\n"
elif c=='\t':
return "\\t"
elif c=='\r':
return "\\r"
else:
return c
def getCharErrorDisplay(self, c:str):
return "'" + self.getErrorDisplayForChar(c) + "'"
# Lexers can normally match any char in it's vocabulary after matching
# a token, so do the easy thing and just kill a character and hope
# it all works out. You can instead use the rule invocation stack
# to do sophisticated error recovery if you are in a fragment rule.
#/
def recover(self, re:RecognitionException):
if self._input.LA(1) != Token.EOF:
if isinstance(re, LexerNoViableAltException):
# skip a char and try again
self._interp.consume(self._input)
else:
# TODO: Do we lose character or line position information?
self._input.consume()
|
42feddca0aff08ed6d508231c5ada21173232f1a
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/python/tvm/topi/nn/conv2d_transpose.py
|
5638d3d77fd28f17ab491d54c80cbf7a47499c67
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 11,441
|
py
|
conv2d_transpose.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
import collections
import tvm
from tvm import relay, te
from ..utils import simplify
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
assert len(x) == n, f"Input can only have {n} elements, but got {len(x)} instead: {x}."
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return declaration_conv2d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding=output_padding
)
def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv2d_transpose the same as conv2d"""
batch, in_c, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_h, filter_w),
lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv2d transpose"""
data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_c = simplify(out_c)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[b, dc, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dh, dw].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return Output
def group_conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding, groups):
"""Group convolution operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [in_channel, out_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if groups == 1:
return conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding)
# some pre-processing and prelimnary checks
if out_dtype is None:
out_dtype = data.dtype
batch, in_channels, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
assert (
in_channels % groups == 0
), f"input channels {in_channels} must divide group size {groups}"
# assert out_c % groups == 0, f"output channels {in_c} must divide group size {groups}"
strides = _pair(stride)
# padding = _pair(padding)
# output_padding = _pair(output_padding)
# dilation = _pair(dilation)
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert (
opad_h < stride_h and opad_w < stride_w
), f"[{output_padding}] opad_h:{opad_h} < stride_h:{stride_h} \
and opad_w:{opad_w} < stride_w:{stride_w} does not satisfy."
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_channels, filter_h, filter_w),
lambda i, o, h, w: kernel[o][i][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
batch, in_channels, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_channels = simplify(out_c * groups)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_channels // groups), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
# data: batch, in_channels, out_h, out_w
# weight: out_channels // G, in_channels, out_h, out_w
return te.compute(
(batch, out_channels, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[
b, c // (out_channels // groups) * (in_channels // groups) + dc, h + dh, w + dw
].astype(out_dtype)
* kernel_transform[
c % (out_channels // groups),
c // (out_channels // groups) * (in_channels // groups) + dc,
dh,
dw,
].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="group_conv2d_transpose_nchw",
)
def layout_transform(tensor: "relay.Expr", current_layout: str, desired_layout: str):
"""Transform a tensor with the current layout to the desired layout.
E.g. layout_transform(t, "NCHW", "CNHW") --> relay.transpose(t, [1, 0, 2, 3])
Parameters
----------
tensor: relay.Expr
The Tensor to transpose
current_layout: str
The current layout e.g. NCHW or OIHW
desired_layout: str
The desired layout, must be compatible with current_layout
Returns
-------
The layout_transformed tensor.
"""
if sorted(current_layout) != sorted(desired_layout):
raise ValueError(f"Incompatible layouts: {current_layout} vs {desired_layout}")
if current_layout == desired_layout:
return tensor
current_layout_map = {c: i for i, c in enumerate(current_layout)}
desired_layout_map = {c: i for i, c in enumerate(desired_layout)}
axes = [None] * len(current_layout)
for c, i in desired_layout_map.items():
axes[i] = current_layout_map[c]
return relay.transpose(tensor, axes=axes)
@tvm.target.generic_func
def conv2d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 2D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 2D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
target = tvm.target.Target.current(allow_none=True)
if target and "cudnn" in target.libs:
# cuDNN backend can directly operate on NHWC layout.
return None
if attrs["data_layout"] == "NHWC":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
# Set new attrs for conv2d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCHW"
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
# Convert data to NCHW.
data = relay.transpose(data, axes=(0, 3, 1, 2))
deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)
# Convert back to original NHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 1))
return out
if attrs["data_layout"] == "NCHW":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
new_attrs = {k: attrs[k] for k in attrs.keys()}
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
return relay.nn.conv2d_transpose(data, kernel, **new_attrs)
return None
|
3934e0247709b3f33ab4712963891f925d3436ad
|
dc53b4b9a12875e832573f31906e889ae2719e8c
|
/src/train_utils.py
|
f3db39c75c7580034033d163591c93f3124e08a5
|
[] |
no_license
|
rezazad68/Deep-Intervertebral-Disc-Labeling
|
be7601357df68a1a79b3ff8f576aed6456e33acd
|
fb310ee1515d7e55e1d6ebf3af84d9c2bad1d520
|
refs/heads/main
| 2023-08-30T23:34:56.527755
| 2023-08-24T21:42:26
| 2023-08-24T21:42:26
| 394,945,794
| 227
| 4
| null | 2021-08-12T16:25:08
| 2021-08-11T10:10:31
|
Python
|
UTF-8
|
Python
| false
| false
| 15,822
|
py
|
train_utils.py
|
# Author: Lucas
# Copyright (c) 2020 Polytechnique Montreal <www.neuro.polymtl.ca>
# Revised by Reza Azad (several functions added)
from torchvision import transforms
import numpy as np
from torch.utils.data import Dataset
# from Data2array import *
import matplotlib.pyplot as plt
import PIL
from transform_spe import *
import skimage
import cv2
from scipy import signal
import torch
# normalize Image
def normalize(arr):
ma = arr.max()
mi = arr.min()
return ((arr - mi) / (ma - mi))
# Useful function to generate a Gaussian Function on given coordinates. Used to generate groudtruth.
def label2MaskMap_GT(data, shape, c_dx=0, c_dy=0, radius=5, normalize=False):
"""
Generate a Mask map from the coordenates
:param shape: dimension of output
:param data : input image
:param radius: is the radius of the gaussian function
:param normalize : bool for normalization.
:return: a MxN normalized array
"""
# Our 2-dimensional distribution will be over variables X and Y
(M, N) = (shape[2], shape[1])
if len(data) <= 2:
# Output coordinates are reduced during post processing which poses a problem
data = [0, data[0], data[1]]
maskMap = []
x, y = data[2], data[1]
# Correct the labels
x += c_dx
y += c_dy
X = np.linspace(0, M - 1, M)
Y = np.linspace(0, N - 1, N)
X, Y = np.meshgrid(X, Y)
# Pack X and Y into a single 3-dimensional array
pos = np.empty(X.shape + (2,))
pos[:, :, 0] = X
pos[:, :, 1] = Y
# Mean vector and covariance matrix
mu = np.array([x, y])
Sigma = np.array([[radius, 0], [0, radius]])
# The distribution on the variables X, Y packed into pos.
Z = multivariate_gaussian(pos, mu, Sigma)
# Normalization
if normalize:
Z *= (1 / np.max(Z))
else:
# 8bit image values (the loss go to inf+)
Z *= (1 / np.max(Z))
Z = np.asarray(Z * 255, dtype=np.uint8)
maskMap.append(Z)
if len(maskMap) == 1:
maskMap = maskMap[0]
return np.asarray(maskMap)
def extract_all(list_coord_label, shape_im=(1, 150, 200)):
"""
Create groundtruth by creating gaussian Function for every ground truth points for a single image
:param list_coord_label: list of ground truth coordinates
:param shape_im: shape of output image with zero padding
:return: a 2d heatmap image.
"""
shape_tmp = (1, shape_im[0], shape_im[1])
final = np.zeros(shape_tmp)
for x in list_coord_label:
train_lbs_tmp_mask = label2MaskMap_GT(x, shape_tmp)
for w in range(shape_im[0]):
for h in range(shape_im[1]):
final[0, w, h] = max(final[0, w, h], train_lbs_tmp_mask[w, h])
return (final)
def extract_groundtruth_heatmap(DataSet):
"""
Loop across images to create the dataset of groundtruth and images to input for training
:param DataSet: An array containing [images, GT corrdinates]
:return: an array containing [image, heatmap]
"""
[train_ds_img, train_ds_label] = DataSet
tmp_train_labels = [0 for i in range(len(train_ds_label))]
tmp_train_img = [0 for i in range(len(train_ds_label))]
train_ds_img = np.array(train_ds_img)
for i in range(len(train_ds_label)):
final = extract_all(train_ds_label[i], shape_im=train_ds_img[0].shape)
tmp_train_labels[i] = normalize(final[0, :, :])
tmp_train_labels = np.array(tmp_train_labels)
for i in range(len(train_ds_img)):
print(train_ds_img[i].shape)
tmp_train_img[i] = (normalize(train_ds_img[i][:, :, 0]))
tmp_train_labels = np.expand_dims(tmp_train_labels, axis=-1)
tmp_train_img = np.expand_dims(train_ds_img, axis=-1)
return [tmp_train_img, tmp_train_labels]
class image_Dataset(Dataset):
def __init__(self, image_paths, target_paths, use_flip = True): # initial logic happens like transform
self.image_paths = image_paths
self.target_paths = target_paths
self.num_vis_joints = []
self.use_flip = use_flip
@staticmethod
def rotate_img(img):
img = np.rot90(img)
#img = np.flip(img, axis=0)
img = np.flip(img, axis=1)
return img
def get_posedata(self, img, msk, num_ch=11):
msk = msk[:, :, 0]
msk = self.rotate_img(msk)
#img = self.rotate_img(img)
ys = msk.shape
ys_ch = np.zeros([ys[0], ys[1], num_ch])
msk_uint = np.uint8(np.where(msk >0.2, 1, 0))
num_labels, labels_im = cv2.connectedComponents(msk_uint)
self.num_vis_joints.append(num_labels-1)
try:
# the <0> label is the background
for i in range(1, num_labels):
y_i = msk * np.where(labels_im == i, 1, 0)
ys_ch[:,:, i-1] = y_i
except:
print(num_labels)
# plt.imshow(img, cmap='gray')
# plt.savefig('spine/img.png')
# plt.imshow(msk, cmap='gray')
# plt.savefig('spine/mks.png')
# plt.imshow(msk_uint, cmap='gray')
# plt.savefig('spine/msk_uint.png')
ys_ch = np.rot90(ys_ch)
ys_ch = np.flip(ys_ch, axis=1)
vis = np.zeros((num_ch, 1))
vis[:num_labels-1] = 1
return img, ys_ch, vis
@staticmethod
def bluring2D(data, kernel_halfsize=3, sigma=1.0):
x = np.arange(-kernel_halfsize,kernel_halfsize+1,1)
y = np.arange(-kernel_halfsize,kernel_halfsize+1,1)
xx, yy = np.meshgrid(x,y)
kernel = np.exp(-(xx**2 + yy**2)/(2*sigma**2))
filtered = signal.convolve(data, kernel, mode="same")
return filtered
def transform(self, image, mask):
#print(image.shape)
image = normalize(image[:, :, 0])
#image = skimage.exposure.equalize_adapthist(image, kernel_size=10, clip_limit=0.02)
image = np.expand_dims(image, -1)
## extract joints for pose model
# Random horizontal flipping
if self.use_flip:
image, mask = RandomHorizontalFlip()(image, mask)
# Random vertical flipping
# image,mask = RandomVerticalFlip()(image,mask)
# random90 flipping
temp_img = np.zeros((image.shape[0], image.shape[1], 3))
temp_img[:,:,0:1]= image
temp_img[:,:,1:2]= image
temp_img[:,:,2:3]= image
image = temp_img
# image,mask = RandomangleFlip()(image,mask)
# Transform to tensor
image, mask = ToTensor()(image, mask)
return image, mask
def __getitem__(self, index):
mask = self.target_paths[index]
# maxval = np.max(np.max(mask))
# mask = np.where(mask==maxval, 1, 0)
# mask = self.bluring2D(mask[:,:,0], kernel_halfsize=15, sigma=3)
# mask /= np.max(np.max(mask))
# plt.imshow(mask, cmap='gray')
# plt.savefig('spine/mask.png')
mask = cv2.resize(mask, (256, 256))
mask = mask.astype(np.float32)
mask = np.expand_dims(mask, axis= -1)
image = self.image_paths[index]
image = cv2.resize(image, (256, 256))
image = image.astype(np.float32)
image = np.expand_dims(image, axis= -1)
image, mask, vis = self.get_posedata(image, mask, num_ch=11)
t_image, t_mask = self.transform(image, mask)
vis = torch.FloatTensor(vis)
return t_image, t_mask, vis
def __len__(self): # return count of sample we have
return len(self.image_paths)
class HeatmapLoss(torch.nn.Module):
"""
loss for detection heatmap
"""
def __init__(self):
super(HeatmapLoss, self).__init__()
def forward(self, pred, gt):
l = ((pred - gt)**2)
l = l.mean(dim=3).mean(dim=2).mean(dim=1)
return l ## l of dim bsize
def save_epoch_res_as_image(inputs, targets, epoch_num, flag_gt):
targets = targets.data.cpu().numpy()
inputs = inputs.data.cpu().numpy()
if not(flag_gt):
targets[np.where(targets<0.5)] = 0
hues = np.linspace(0, 179, targets.shape[1], dtype=np.uint8)
blank_ch = 255*np.ones_like(targets[0, 0], dtype=np.uint8)
for y, x in zip(targets, inputs):
y_colored = np.zeros([y.shape[1], y.shape[2], 3], dtype=np.uint8)
y_all = np.zeros([y.shape[1], y.shape[2]], dtype=np.uint8)
for ych, hue_i in zip(y, hues):
ych = ych/np.max(np.max(ych))
ych[np.where(ych<0.5)] = 0
# ych = cv2.GaussianBlur(ych,(15,15),cv2.BORDER_DEFAULT)
ych_hue = np.ones_like(ych, dtype=np.uint8)*hue_i
ych = np.uint8(255*ych/np.max(ych))
colored_ych = np.zeros_like(y_colored, dtype=np.uint8)
colored_ych[:, :, 0] = ych_hue
colored_ych[:, :, 1] = blank_ch
colored_ych[:, :, 2] = ych
colored_y = cv2.cvtColor(colored_ych, cv2.COLOR_HSV2BGR)
y_colored += colored_y
y_all += ych
x = np.moveaxis(x, 0, -1)
x = x/np.max(x)*255
x_3ch = np.zeros([x.shape[0], x.shape[1], 3])
for i in range(3):
x_3ch[:, :, i] = x[:, :, 0]
img_mix = np.uint8(x_3ch*0.5 + y_colored*0.5)
txt = ''
if flag_gt:
txt = f'../visualize/epo_{epoch_num:3d}_gt.png'
else:
txt = f'../visualize/epo_{epoch_num:3d}_pr.png'
cv2.imwrite(txt, img_mix)
break
from torchvision.utils import make_grid
def save_epoch_res_as_image2(inputs, outputs, targets, epoch_num, target_th=0.4, pretext=False):
max_epoch = 500
target_th = target_th + (epoch_num/max_epoch*0.2)
targets = targets.data.cpu().numpy()
outputs = outputs.data.cpu().numpy()
inputs = inputs.data.cpu().numpy()
clr_vis_Y = []
hues = np.linspace(0, 179, targets.shape[1], dtype=np.uint8)
blank_ch = 255*np.ones_like(targets[0, 0], dtype=np.uint8)
for Y in [targets, outputs]:
for y, x in zip(Y, inputs):
y_colored = np.zeros([y.shape[1], y.shape[2], 3], dtype=np.uint8)
y_all = np.zeros([y.shape[1], y.shape[2]], dtype=np.uint8)
for ych, hue_i in zip(y, hues):
ych = ych/np.max(np.max(ych))
ych[np.where(ych<target_th)] = 0
# ych = cv2.GaussianBlur(ych,(15,15),cv2.BORDER_DEFAULT)
ych_hue = np.ones_like(ych, dtype=np.uint8)*hue_i
ych = np.uint8(255*ych/np.max(ych))
colored_ych = np.zeros_like(y_colored, dtype=np.uint8)
colored_ych[:, :, 0] = ych_hue
colored_ych[:, :, 1] = blank_ch
colored_ych[:, :, 2] = ych
colored_y = cv2.cvtColor(colored_ych, cv2.COLOR_HSV2BGR)
y_colored += colored_y
y_all += ych
x = np.moveaxis(x, 0, -1)
x = x/np.max(x)*255
x_3ch = np.zeros([x.shape[0], x.shape[1], 3])
for i in range(3):
x_3ch[:, :, i] = x[:, :, 0]
img_mix = np.uint8(x_3ch*0.5 + y_colored*0.5)
# img_mix = cv2.cvtColor(img_mix, cv2.COLOR_BGR2RGB)
clr_vis_Y.append(img_mix)
t = np.array(clr_vis_Y)
t = np.transpose(t, [0, 3, 1, 2])
trgts = make_grid(torch.Tensor(t), nrow=4)
if pretext:
txt = f'../visualize/{epoch_num:0=4d}_test_result.png'
else:
txt = f'../visualize/epoch_{epoch_num:0=4d}_res2.png'
res = np.transpose(trgts.numpy(), (1,2,0))
print(res.shape)
cv2.imwrite(txt, res)
# plt.imshow()
# plt.savefig(txt)
# cv2.imwrite(txt, )
def multivariate_gaussian(pos, mu, Sigma):
"""
Return the multivariate Gaussian distribution on array.
pos is an array constructed by packing the meshed arrays of variables
x_1, x_2, x_3, ..., x_k into its _last_ dimension.
"""
n = mu.shape[0]
Sigma_det = np.linalg.det(Sigma)
Sigma_inv = np.linalg.inv(Sigma)
N = np.sqrt((2 * np.pi) ** n * Sigma_det)
# This einsum call calculates (x-mu)T.Sigma-1.(x-mu) in a vectorized
# way across all the input variables.
fac = np.einsum('...k,kl,...l->...', pos - mu, Sigma_inv, pos - mu)
return np.exp(-fac / 2) / N
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
import math
def sigmoid(x):
x = np.array(x)
x = 1/(1+np.exp(-x))
x[x<0.0] = 0
return x
# return np.where(1/(1+np.exp(-x))>0.5, 1., 0.)
import copy
def save_attention(inputs, outputs, targets, att, target_th=0.5):
targets = targets.data.cpu().numpy()
outputs = outputs.data.cpu().numpy()
inputs = inputs.data.cpu().numpy()
att = att.detach().to('cpu')
att = torch.sigmoid(att).numpy()
att = np.uint8(att*255)
att[att<128+80] = 0
# att = sigmoid(att)
att = cv2.resize(att, (256, 256))
att = cv2.applyColorMap(att, cv2.COLORMAP_JET)
rgbatt = copy.copy(inputs[0])
rgbatt = np.moveaxis(rgbatt, 0, -1)
rgbatt = rgbatt*255*0.5+ att*0.5
# at2[:, :, 0] = att
# at2[:, :, 1] = att
# at2[:, :, 2] = att
# at2 = cv2.applyColorMap(np.uint8(at2*255), cv2.COLORMAP_JET)
# rgbatt = at2
# rgbatt = cv2.addWeighted(rgbatt, 0.7, att, 0.3, 0)
clr_vis_Y = []
hues = np.linspace(0, 179, targets.shape[1], dtype=np.uint8)
blank_ch = 255*np.ones_like(targets[0, 0], dtype=np.uint8)
for Y in [targets, outputs]:
for y, x in zip(Y, inputs):
y_colored = np.zeros([y.shape[1], y.shape[2], 3], dtype=np.uint8)
y_all = np.zeros([y.shape[1], y.shape[2]], dtype=np.uint8)
for ych, hue_i in zip(y, hues):
ych = ych/np.max(np.max(ych))
ych[np.where(ych<target_th)] = 0
# ych = cv2.GaussianBlur(ych,(15,15),cv2.BORDER_DEFAULT)
ych_hue = np.ones_like(ych, dtype=np.uint8)*hue_i
ych = np.uint8(255*ych/np.max(ych))
colored_ych = np.zeros_like(y_colored, dtype=np.uint8)
colored_ych[:, :, 0] = ych_hue
colored_ych[:, :, 1] = blank_ch
colored_ych[:, :, 2] = ych
colored_y = cv2.cvtColor(colored_ych, cv2.COLOR_HSV2BGR)
y_colored += colored_y
y_all += ych
x = np.moveaxis(x, 0, -1)
x = x/np.max(x)*255
x_3ch = np.zeros([x.shape[0], x.shape[1], 3])
for i in range(3):
x_3ch[:, :, i] = x[:, :, 0]
img_mix = np.uint8(x_3ch*0.5 + y_colored*0.5)
# img_mix = cv2.cvtColor(img_mix, cv2.COLOR_BGR2RGB)
clr_vis_Y.append(img_mix)
clr_vis_Y.append(rgbatt)
t = np.array(clr_vis_Y)
t = np.transpose(t, [0, 3, 1, 2])
trgts = make_grid(torch.Tensor(t), nrow=4)
txt = '../visualize/attention_visualization.png'
res = np.transpose(trgts.numpy(), (1,2,0))
cv2.imwrite(txt, res)
# plt.imshow()
# plt.savefig(txt)
# cv2.imwrite(txt, )
|
f4d4f4d94f9834b20acd8c3826c25f053dc23c15
|
09c46a2414e674d8631731d58c2c17f8268b5a60
|
/srv/fluffi/data/fluffiweb/config.py
|
edd0f627964a732beefa05d26583096cb7dec503
|
[
"MIT"
] |
permissive
|
siemens/fluffi
|
49ea030e4411ed02e60d3a5394c0a32651450598
|
70cd18deff9d688e10ec71a1eb4f38d7f2d581c0
|
refs/heads/master
| 2022-09-10T00:23:12.051619
| 2022-08-19T13:41:58
| 2022-08-19T13:41:58
| 209,495,395
| 102
| 24
|
MIT
| 2023-02-19T10:45:36
| 2019-09-19T07:55:43
|
C++
|
UTF-8
|
Python
| false
| false
| 2,787
|
py
|
config.py
|
# Copyright 2017-2020 Siemens AG
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Author(s): Junes Najah, Thomas Riedmaier, Pascal Eckmann
import os
import socket
def getHostByNameHandler(name):
try:
host = socket.gethostbyname(name)
except:
host = name
return host
BASEDIR = os.path.abspath(os.path.dirname(__file__))
LOCAL_DEV = False
if LOCAL_DEV:
# set to True if you want to run it on linux with database, ftp and polemarch container
WITH_CONTAINERS = False
DBUSER = "fluffi_gm" if WITH_CONTAINERS else "root"
DBPASS = "fluffi_gm" if WITH_CONTAINERS else "toor"
DBHOST = "localhost"
POLE_URL = "http://localhost:8888/api/v2/"
FTP_URL = getHostByNameHandler('localhost')
else:
DBUSER = "fluffi_gm"
DBPASS = "fluffi_gm"
DBHOST = getHostByNameHandler('db.fluffi')
POLE_URL = "http://pole.fluffi:8888/api/v2/"
FTP_URL = getHostByNameHandler('ftp.fluffi')
SQLALCHEMY_DATABASE_URI = "mysql://{}:{}@{}/fluffi_gm".format(DBUSER, DBPASS, DBHOST)
DBFILE = "sql_files/createLMDB.sql"
DBPREFIX = "fluffi_"
DEFAULT_DBNAME = "information_schema"
MQTT_HOST = getHostByNameHandler('mon.fluffi')
MQTT_PORT = 1883
SQLALCHEMY_TRACK_MODIFICATIONS = False
WTF_CSRF_ENABLED = True
SECRET_KEY = "asdf"
BOOTSTRAP_SERVE_LOCAL = True
SEND_FILE_MAX_AGE_DEFAULT = 0
|
b0b65d5cf13188b8d6c924a7f22bf7202d3436e9
|
eca37de3be161edfeff5e10a88e699b174572d30
|
/kymatio/frontend/entry.py
|
7bb8190040d7bc6dbb0071e6d47d9bd5a5b359ec
|
[
"BSD-3-Clause"
] |
permissive
|
kymatio/kymatio
|
e9cc782673dcb0fe72a14c673ff547996bec87aa
|
7e4099539c5c59eea8a76edcd5c940489e8d6cee
|
refs/heads/main
| 2023-08-21T22:18:28.591466
| 2022-06-12T02:03:25
| 2023-07-05T01:21:41
| 151,186,173
| 680
| 153
|
BSD-3-Clause
| 2023-09-11T21:43:35
| 2018-10-02T01:45:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
entry.py
|
import logging
import warnings
import importlib
class ScatteringEntry(object):
def __init__(self, *args, **kwargs):
self.name = kwargs['name']
self.class_name = kwargs['class_name']
kwargs.pop('name')
kwargs.pop('class_name')
frontend_suffixes = {'torch' : 'Torch',
'numpy' : 'NumPy',
'tensorflow' : 'TensorFlow',
'jax' : 'Jax',
'keras': 'Keras',
'sklearn': 'Transformer'}
if 'frontend' not in kwargs:
frontend = 'numpy'
else:
frontend = kwargs['frontend'].lower()
kwargs.pop('frontend')
frontends = list(frontend_suffixes.keys())
if frontend not in frontends:
raise RuntimeError('The frontend \'%s\" is not valid. Must be '
'one of \'%s\', or \'%s\'.' %
(frontend, '\', \''.join(frontends[:-1]),
frontends[-1]))
try:
module = importlib.import_module('kymatio.' + self.class_name + '.frontend.' + frontend + '_frontend')
# Create frontend-specific class name by inserting frontend name
# in lieu of "Entry"
frontend = frontend_suffixes[frontend]
class_name = self.__class__.__name__
class_name = class_name.replace("Entry", frontend)
self.__class__ = getattr(module, class_name)
self.__init__(*args, **kwargs)
except Exception as e:
raise e from RuntimeError('\nThe frontend \'' + frontend + '\' could not be correctly imported.')
logging.info('The ' + self.name + ' frontend ' + frontend + ' was imported.')
__all__ = ['ScatteringEntry']
|
6c708ad0830ad2b26f14719411a9b95cdce9cfaa
|
8af4e173ab3be9b9fc5cf1b61dbb5da80234d5c7
|
/tests/integration/unit_test/test_unit_test_java8_al2.py
|
c5336b1b98bbd67dca7518128ec41e8ca2420157
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-sam-cli-app-templates
|
d464da1665d84eda9f427f682b985538827d41b6
|
88380eb265d58c496ea80685d4a5701e3cfc13d2
|
refs/heads/master
| 2023-09-04T13:03:00.204479
| 2023-08-23T20:43:24
| 2023-08-23T20:43:24
| 211,362,544
| 354
| 230
|
Apache-2.0
| 2023-09-14T15:39:09
| 2019-09-27T16:42:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
test_unit_test_java8_al2.py
|
from unittest import skip
from tests.integration.unit_test.unit_test_base import UnitTestBase
class UnitTest_java8_al2_cookiecutter_aws_sam_hello_java_gradle(UnitTestBase.JavaUnitTestGradleBase):
directory = "java8.al2/hello-gradle"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_hello_java_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/hello-maven"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_hello_java_powertools_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/hello-pt-maven"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_eventbridge_hello_java_gradle(UnitTestBase.JavaUnitTestGradleBase):
directory = "java8.al2/event-bridge-gradle"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_eventbridge_hello_java_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/event-bridge-maven"
code_directories = ["HelloWorldFunction"]
@skip("eventbridge schema app requires credential to pull missing files, skip")
class UnitTest_java8_al2_cookiecutter_aws_sam_eventbridge_schema_app_java_gradle(UnitTestBase.JavaUnitTestGradleBase):
directory = "java8.al2/event-bridge-schema-gradle"
code_directories = ["HelloWorldFunction"]
@skip("eventbridge schema app requires credential to pull missing files, skip")
class UnitTest_java8_al2_cookiecutter_aws_sam_eventbridge_schema_app_java_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/event-bridge-schema-maven"
code_directories = ["HelloWorldFunction"]
class UnitTest_java8_al2_cookiecutter_aws_sam_step_functions_sample_app_gradle(UnitTestBase.JavaUnitTestGradleBase):
directory = "java8.al2/step-func-gradle"
code_directories = [
"functions/StockBuyer",
"functions/StockChecker",
"functions/StockSeller",
]
class UnitTest_java8_al2_cookiecutter_aws_sam_step_functions_sample_app_maven(UnitTestBase.JavaUnitTestMavenBase):
directory = "java8.al2/step-func-maven"
code_directories = [
"functions/StockBuyer",
"functions/StockChecker",
"functions/StockSeller",
]
|
3f0eb6bdf6f75da6be739f5f749ddd5beb622ff6
|
fdb9bdc6c4ab2f14ba71e544493706d5e275899f
|
/fhir/resources/R4B/tests/test_healthcareservice.py
|
69e3e87d8dce92f30d2d86dd708b0298a560dc1e
|
[
"BSD-3-Clause"
] |
permissive
|
nazrulworld/fhir.resources
|
6ae8aea8180c611b0c5050759c6dcdf63e4cb061
|
1fd6ea476b27b3fcb8c4ef8f23bc51cf161e69e3
|
refs/heads/main
| 2023-08-30T18:27:27.277249
| 2023-07-03T19:57:06
| 2023-07-03T19:57:06
| 165,297,877
| 256
| 83
|
NOASSERTION
| 2023-08-24T15:34:05
| 2019-01-11T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 5,989
|
py
|
test_healthcareservice.py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/HealthcareService
Release: R4B
Version: 4.3.0
Build ID: c475c22
Last updated: 2022-05-28T12:47:40.239+10:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import healthcareservice
def impl_healthcareservice_1(inst):
assert inst.active is True
assert inst.appointmentRequired is False
assert (
inst.availabilityExceptions
== "Reduced capacity is available during the Christmas period"
)
assert inst.availableTime[0].allDay is True
assert inst.availableTime[0].daysOfWeek[0] == "wed"
assert inst.availableTime[1].availableEndTime == fhirtypes.Time.validate("05:30:00")
assert inst.availableTime[1].availableStartTime == fhirtypes.Time.validate(
"08:30:00"
)
assert inst.availableTime[1].daysOfWeek[0] == "mon"
assert inst.availableTime[1].daysOfWeek[1] == "tue"
assert inst.availableTime[1].daysOfWeek[2] == "thu"
assert inst.availableTime[1].daysOfWeek[3] == "fri"
assert inst.availableTime[2].availableEndTime == fhirtypes.Time.validate("04:30:00")
assert inst.availableTime[2].availableStartTime == fhirtypes.Time.validate(
"09:30:00"
)
assert inst.availableTime[2].daysOfWeek[0] == "sat"
assert inst.availableTime[2].daysOfWeek[1] == "fri"
assert inst.category[0].coding[0].code == "8"
assert inst.category[0].coding[0].display == "Counselling"
assert (
inst.category[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/service-category"
)
assert inst.category[0].text == "Counselling"
assert inst.characteristic[0].coding[0].display == "Wheelchair access"
assert inst.comment == (
"Providing Specialist psychology services to the greater Den "
"Burg area, many years of experience dealing with PTSD issues"
)
assert inst.contained[0].id == "DenBurg"
assert inst.coverageArea[0].display == "Greater Denburg area"
assert inst.coverageArea[0].reference == "#DenBurg"
assert inst.eligibility[0].code.coding[0].display == "DVA Required"
assert inst.eligibility[0].comment == (
"Evidence of application for DVA status may be sufficient for"
" commencing assessment"
)
assert inst.endpoint[0].reference == "Endpoint/example"
assert inst.id == "example"
assert inst.identifier[0].system == "http://example.org/shared-ids"
assert inst.identifier[0].value == "HS-12"
assert inst.location[0].reference == "Location/1"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.name == "Consulting psychologists and/or psychology services"
assert inst.notAvailable[0].description == "Christmas/Boxing Day"
assert inst.notAvailable[0].during.end == fhirtypes.DateTime.validate("2015-12-26")
assert inst.notAvailable[0].during.start == fhirtypes.DateTime.validate(
"2015-12-25"
)
assert inst.notAvailable[1].description == "New Years Day"
assert inst.notAvailable[1].during.end == fhirtypes.DateTime.validate("2016-01-01")
assert inst.notAvailable[1].during.start == fhirtypes.DateTime.validate(
"2016-01-01"
)
assert inst.program[0].text == "PTSD outreach"
assert inst.providedBy.display == "Burgers University Medical Center"
assert inst.providedBy.reference == "Organization/f001"
assert inst.referralMethod[0].coding[0].code == "phone"
assert inst.referralMethod[0].coding[0].display == "Phone"
assert inst.referralMethod[1].coding[0].code == "fax"
assert inst.referralMethod[1].coding[0].display == "Fax"
assert inst.referralMethod[2].coding[0].code == "elec"
assert inst.referralMethod[2].coding[0].display == "Secure Messaging"
assert inst.referralMethod[3].coding[0].code == "semail"
assert inst.referralMethod[3].coding[0].display == "Secure Email"
assert inst.serviceProvisionCode[0].coding[0].code == "cost"
assert inst.serviceProvisionCode[0].coding[0].display == "Fees apply"
assert inst.serviceProvisionCode[0].coding[0].system == (
"http://terminology.hl7.org/CodeSystem/service-provision-" "conditions"
)
assert inst.specialty[0].coding[0].code == "47505003"
assert inst.specialty[0].coding[0].display == "Posttraumatic stress disorder"
assert inst.specialty[0].coding[0].system == "http://snomed.info/sct"
assert inst.telecom[0].system == "phone"
assert inst.telecom[0].use == "work"
assert inst.telecom[0].value == "(555) silent"
assert inst.telecom[1].system == "email"
assert inst.telecom[1].use == "work"
assert inst.telecom[1].value == "directaddress@example.com"
assert inst.text.status == "generated"
assert inst.type[0].coding[0].code == "394913002"
assert inst.type[0].coding[0].display == "Psychotherapy"
assert inst.type[0].coding[0].system == "http://snomed.info/sct"
assert inst.type[1].coding[0].code == "394587001"
assert inst.type[1].coding[0].display == "Psychiatry"
assert inst.type[1].coding[0].system == "http://snomed.info/sct"
def test_healthcareservice_1(base_settings):
"""No. 1 tests collection for HealthcareService.
Test File: healthcareservice-example.json
"""
filename = base_settings["unittest_data_dir"] / "healthcareservice-example.json"
inst = healthcareservice.HealthcareService.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "HealthcareService" == inst.resource_type
impl_healthcareservice_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "HealthcareService" == data["resourceType"]
inst2 = healthcareservice.HealthcareService(**data)
impl_healthcareservice_1(inst2)
|
94a2322a14875d301e85c01629ab6c98caf9646f
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/tests/tensorflow/test_keras_model_export.py
|
5e32051d837b56efa8eada21029972d25863ac31
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 30,059
|
py
|
test_keras_model_export.py
|
import json
import os
import pickle
import random
import shutil
from pathlib import Path
from unittest import mock
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
import yaml
from packaging.version import Version
# pylint: disable=no-name-in-module
from sklearn import datasets
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Layer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import SGD
import mlflow
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.deployments import PredictionsResponse
from mlflow.models import Model, ModelSignature
from mlflow.models.utils import _read_example
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types.schema import Schema, TensorSpec
from mlflow.utils.conda import get_or_create_conda_env
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from tests.helper_functions import (
PROTOBUF_REQUIREMENT,
_assert_pip_requirements,
_compare_conda_env_requirements,
_compare_logged_code_paths,
_is_available_on_pypi,
_is_importable,
_mlflow_major_version_string,
assert_array_almost_equal,
assert_register_model_called_with_local_model_path,
pyfunc_serve_and_score_model,
)
from tests.pyfunc.test_spark import score_model_as_udf
from tests.tensorflow.test_load_saved_tensorflow_estimator import ModelDataInfo
EXTRA_PYFUNC_SERVING_TEST_ARGS = (
[] if _is_available_on_pypi("tensorflow") else ["--env-manager", "local"]
)
extra_pip_requirements = (
[PROTOBUF_REQUIREMENT] if Version(tf.__version__) < Version("2.6.0") else []
)
@pytest.fixture(scope="module", autouse=True)
def fix_random_seed():
SEED = 0
os.environ["PYTHONHASHSEED"] = str(SEED)
random.seed(SEED)
np.random.seed(SEED)
if Version(tf.__version__) >= Version("2.0.0"):
tf.random.set_seed(SEED)
else:
tf.set_random_seed(SEED)
@pytest.fixture(scope="module")
def data():
return datasets.load_iris(return_X_y=True)
def get_model(data):
x, y = data
model = Sequential()
model.add(Dense(3, input_dim=4))
model.add(Dense(1))
# Use a small learning rate to prevent exploding gradients which may produce
# infinite prediction values
lr = 0.001
kwargs = (
# `lr` was renamed to `learning_rate` in keras 2.3.0:
# https://github.com/keras-team/keras/releases/tag/2.3.0
{"lr": lr}
if Version(tf.__version__) < Version("2.3.0")
else {"learning_rate": lr}
)
model.compile(loss="mean_squared_error", optimizer=SGD(**kwargs))
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def model(data):
return get_model(data)
@pytest.fixture(scope="module")
def model_signature():
return ModelSignature(
inputs=Schema([TensorSpec(np.dtype("float64"), (-1, 4))]),
outputs=Schema([TensorSpec(np.dtype("float32"), (-1, 1))]),
)
def get_tf_keras_model(data):
x, y = data
model = Sequential()
model.add(Dense(3, input_dim=4))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer=SGD(learning_rate=0.001))
model.fit(x, y)
return model
@pytest.fixture(scope="module")
def tf_keras_model(data):
return get_tf_keras_model(data)
@pytest.fixture(scope="module")
def predicted(model, data):
x, _ = data
return model.predict(x)
@pytest.fixture(scope="module")
def custom_layer():
class MyDense(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super().__init__(**kwargs)
def build(self, input_shape):
# pylint: disable=attribute-defined-outside-init
self.kernel = self.add_weight(
name="kernel",
shape=(input_shape[1], self.output_dim),
initializer="uniform",
trainable=True,
)
super().build(input_shape)
def call(self, inputs): # pylint: disable=arguments-differ
return K.dot(inputs, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def get_config(self):
return {"output_dim": self.output_dim}
return MyDense
@pytest.fixture(scope="module")
def custom_model(data, custom_layer):
x, y = data
model = Sequential()
model.add(Dense(6, input_dim=4))
model.add(custom_layer(1))
model.compile(loss="mean_squared_error", optimizer="SGD")
model.fit(x, y, epochs=1)
return model
@pytest.fixture(scope="module")
def custom_predicted(custom_model, data):
x, _ = data
return custom_model.predict(x)
@pytest.fixture
def model_path(tmp_path):
return os.path.join(tmp_path, "model")
@pytest.fixture
def keras_custom_env(tmp_path):
conda_env = os.path.join(tmp_path, "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["keras", "tensorflow", "pytest"])
return conda_env
@pytest.mark.parametrize(
("build_model", "save_format"),
[
(get_model, None),
(get_tf_keras_model, None),
(get_tf_keras_model, "h5"),
(get_tf_keras_model, "tf"),
],
)
def test_model_save_load(build_model, save_format, model_path, data):
x, _ = data
keras_model = build_model(data)
if build_model == get_tf_keras_model:
model_path = os.path.join(model_path, "tf")
else:
model_path = os.path.join(model_path, "plain")
expected = keras_model.predict(x)
kwargs = {"save_format": save_format} if save_format else {}
mlflow.tensorflow.save_model(keras_model, path=model_path, keras_model_kwargs=kwargs)
# Loading Keras model
model_loaded = mlflow.tensorflow.load_model(model_path)
# When saving as SavedModel, we actually convert the model
# to a slightly different format, so we cannot assume it is
# exactly the same.
if save_format != "tf":
assert type(keras_model) == type(model_loaded)
np.testing.assert_allclose(model_loaded.predict(x), expected, rtol=1e-5)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
np.testing.assert_allclose(pyfunc_loaded.predict(x), expected, rtol=1e-5)
def test_pyfunc_serve_and_score(data):
x, _ = data
model = get_model(data)
with mlflow.start_run():
model_info = mlflow.tensorflow.log_model(model, artifact_path="model")
expected = model.predict(x)
scoring_response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=pd.DataFrame(x),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
actual_scoring_response = (
PredictionsResponse.from_json(scoring_response.content.decode("utf-8"))
.get_predictions()
.values.astype(np.float32)
)
np.testing.assert_allclose(actual_scoring_response, expected, rtol=1e-5)
def test_score_model_as_spark_udf(data):
x, _ = data
model = get_model(data)
with mlflow.start_run():
model_info = mlflow.tensorflow.log_model(model, artifact_path="model")
expected = model.predict(x)
x_df = pd.DataFrame(x, columns=["0", "1", "2", "3"])
spark_udf_preds = score_model_as_udf(
model_uri=model_info.model_uri, pandas_df=x_df, result_type="float"
)
np.testing.assert_allclose(
np.array(spark_udf_preds), expected.reshape(len(spark_udf_preds)), rtol=1e-5
)
def test_signature_and_examples_are_saved_correctly(model, data, model_signature):
signature_ = model_signature
example_ = data[0][:3, :]
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.tensorflow.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
if signature is None and example is None:
assert signature is None
else:
assert mlflow_model.signature == signature_
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
np.testing.assert_allclose(_read_example(mlflow_model, path), example)
def test_custom_model_save_load(custom_model, custom_layer, data, custom_predicted, model_path):
x, _ = data
custom_objects = {"MyDense": custom_layer}
mlflow.tensorflow.save_model(custom_model, path=model_path, custom_objects=custom_objects)
# Loading Keras model
model_loaded = mlflow.tensorflow.load_model(model_path)
assert all(model_loaded.predict(x) == custom_predicted)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
assert all(pyfunc_loaded.predict(x) == custom_predicted)
@pytest.mark.allow_infer_pip_requirements_fallback
@pytest.mark.skipif(
Version(tf.__version__) == Version("2.11.1"),
reason="TensorFlow 2.11.1 has a bug with layers specifying output dimensions",
)
def test_custom_model_save_respects_user_custom_objects(custom_model, custom_layer, model_path):
class DifferentCustomLayer:
def __init__(self):
pass
def __call__(self):
pass
incorrect_custom_objects = {"MyDense": DifferentCustomLayer()}
correct_custom_objects = {"MyDense": custom_layer}
mlflow.tensorflow.save_model(
custom_model, path=model_path, custom_objects=incorrect_custom_objects
)
model_loaded = mlflow.tensorflow.load_model(
model_path, keras_model_kwargs={"custom_objects": correct_custom_objects}
)
assert model_loaded is not None
if Version(tf.__version__) <= Version("2.11.0"):
with pytest.raises(TypeError, match=r".+"):
mlflow.tensorflow.load_model(model_path)
else:
# TF dev build following the release of 2.11.0 introduced changes to the recursive
# loading strategy wherein the validation stage of custom objects loaded won't be
# validated eagerly. This prevents a TypeError from being thrown as in the above
# expectation catching validation block. The change in logic now permits loading and
# will not raise an Exception, as validated below.
incorrect_loaded = mlflow.tensorflow.load_model(model_path)
assert incorrect_loaded is not None
def test_model_load_from_remote_uri_succeeds(model, model_path, mock_s3_bucket, data, predicted):
x, _ = data
mlflow.tensorflow.save_model(model, path=model_path)
artifact_root = f"s3://{mock_s3_bucket}"
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = artifact_root + "/" + artifact_path
model_loaded = mlflow.tensorflow.load_model(model_uri=model_uri)
assert all(model_loaded.predict(x) == predicted)
def test_model_log(model, data, predicted):
x, _ = data
# should_start_run tests whether or not calling log_model() automatically starts a run.
for should_start_run in [False, True]:
try:
if should_start_run:
mlflow.start_run()
artifact_path = "keras_model"
model_info = mlflow.tensorflow.log_model(model, artifact_path=artifact_path)
model_uri = f"runs:/{mlflow.active_run().info.run_id}/{artifact_path}"
assert model_info.model_uri == model_uri
# Load model
model_loaded = mlflow.tensorflow.load_model(model_uri=model_uri)
assert all(model_loaded.predict(x) == predicted)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri=model_uri)
assert all(pyfunc_loaded.predict(x) == predicted)
finally:
mlflow.end_run()
def test_log_model_calls_register_model(model):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.tracking._model_registry.fluent._register_model")
with mlflow.start_run(), register_model_patch:
mlflow.tensorflow.log_model(
model, artifact_path=artifact_path, registered_model_name="AdsModel1"
)
model_uri = f"runs:/{mlflow.active_run().info.run_id}/{artifact_path}"
assert_register_model_called_with_local_model_path(
register_model_mock=mlflow.tracking._model_registry.fluent._register_model,
model_uri=model_uri,
registered_model_name="AdsModel1",
)
def test_log_model_no_registered_model_name(model):
artifact_path = "model"
register_model_patch = mock.patch("mlflow.tracking._model_registry.fluent._register_model")
with mlflow.start_run(), register_model_patch:
mlflow.tensorflow.log_model(model, artifact_path=artifact_path)
mlflow.tracking._model_registry.fluent._register_model.assert_not_called()
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
model, model_path, keras_custom_env
):
mlflow.tensorflow.save_model(model, path=model_path, conda_env=keras_custom_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != keras_custom_env
with open(keras_custom_env) as f:
keras_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == keras_custom_env_parsed
def test_model_save_accepts_conda_env_as_dict(model, model_path):
conda_env = dict(mlflow.tensorflow.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.tensorflow.save_model(model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
def test_model_save_persists_requirements_in_mlflow_model_directory(
model, model_path, keras_custom_env
):
mlflow.tensorflow.save_model(model, path=model_path, conda_env=keras_custom_env)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(keras_custom_env, saved_pip_req_path)
def test_log_model_with_pip_requirements(model, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
# Path to a requirements file
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
with mlflow.start_run():
mlflow.tensorflow.log_model(model, artifact_path="model", pip_requirements=str(req_file))
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), [expected_mlflow_version, "a"], strict=True
)
# List of requirements
with mlflow.start_run():
mlflow.tensorflow.log_model(
model,
artifact_path="model",
pip_requirements=[f"-r {req_file}", "b"],
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), [expected_mlflow_version, "a", "b"], strict=True
)
# Constraints file
with mlflow.start_run():
mlflow.tensorflow.log_model(
model,
artifact_path="model",
pip_requirements=[f"-c {req_file}", "b"],
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"),
[expected_mlflow_version, "b", "-c constraints.txt"],
["a"],
strict=True,
)
def test_log_model_with_extra_pip_requirements(model, tmp_path):
expected_mlflow_version = _mlflow_major_version_string()
default_reqs = mlflow.tensorflow.get_default_pip_requirements()
# Path to a requirements file
req_file = tmp_path.joinpath("requirements.txt")
req_file.write_text("a")
with mlflow.start_run():
mlflow.tensorflow.log_model(
model, artifact_path="model", extra_pip_requirements=str(req_file)
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), [expected_mlflow_version, *default_reqs, "a"]
)
# List of requirements
with mlflow.start_run():
mlflow.tensorflow.log_model(
model,
artifact_path="model",
extra_pip_requirements=[f"-r {req_file}", "b"],
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), [expected_mlflow_version, *default_reqs, "a", "b"]
)
# Constraints file
with mlflow.start_run():
mlflow.tensorflow.log_model(
model,
artifact_path="model",
extra_pip_requirements=[f"-c {req_file}", "b"],
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"),
[expected_mlflow_version, *default_reqs, "b", "-c constraints.txt"],
["a"],
)
def test_model_log_persists_requirements_in_mlflow_model_directory(model, keras_custom_env):
artifact_path = "model"
with mlflow.start_run():
mlflow.tensorflow.log_model(model, artifact_path=artifact_path, conda_env=keras_custom_env)
model_path = _download_artifact_from_uri(
f"runs:/{mlflow.active_run().info.run_id}/{artifact_path}"
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(keras_custom_env, saved_pip_req_path)
def test_model_log_persists_specified_conda_env_in_mlflow_model_directory(model, keras_custom_env):
artifact_path = "model"
with mlflow.start_run():
mlflow.tensorflow.log_model(model, artifact_path=artifact_path, conda_env=keras_custom_env)
model_path = _download_artifact_from_uri(
f"runs:/{mlflow.active_run().info.run_id}/{artifact_path}"
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV]["conda"])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != keras_custom_env
with open(keras_custom_env) as f:
keras_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path) as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == keras_custom_env_parsed
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
model, model_path
):
mlflow.tensorflow.save_model(model, path=model_path)
_assert_pip_requirements(model_path, mlflow.tensorflow.get_default_pip_requirements())
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(model):
artifact_path = "model"
with mlflow.start_run():
mlflow.tensorflow.log_model(model, artifact_path=artifact_path)
model_uri = mlflow.get_artifact_uri(artifact_path)
_assert_pip_requirements(model_uri, mlflow.tensorflow.get_default_pip_requirements())
def test_model_load_succeeds_with_missing_data_key_when_data_exists_at_default_path(
tf_keras_model, model_path, data
):
"""
This is a backwards compatibility test to ensure that models saved in MLflow version <= 0.8.0
can be loaded successfully. These models are missing the `data` flavor configuration key.
"""
mlflow.tensorflow.save_model(
tf_keras_model, path=model_path, keras_model_kwargs={"save_format": "h5"}
)
shutil.move(os.path.join(model_path, "data", "model.h5"), os.path.join(model_path, "model.h5"))
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
flavor_conf = model_conf.flavors.get(mlflow.tensorflow.FLAVOR_NAME, None)
assert flavor_conf is not None
del flavor_conf["data"]
model_conf.save(model_conf_path)
model_loaded = mlflow.tensorflow.load_model(model_path)
assert all(model_loaded.predict(data[0]) == tf_keras_model.predict(data[0]))
@pytest.mark.allow_infer_pip_requirements_fallback
def test_save_model_with_tf_save_format(model_path):
"""Ensures that Keras models can be saved with SavedModel format.
Using SavedModel format (save_format="tf") requires that the file extension
is _not_ "h5".
"""
keras_model = mock.Mock(spec=tf.keras.Model)
mlflow.tensorflow.save_model(
keras_model, path=model_path, keras_model_kwargs={"save_format": "tf"}
)
_, args, kwargs = keras_model.save.mock_calls[0]
# Ensure that save_format propagated through
assert kwargs["save_format"] == "tf"
# Ensure that the saved model does not have h5 extension
assert not args[0].endswith(".h5")
def test_save_and_load_model_with_tf_save_format(tf_keras_model, model_path, data):
"""Ensures that keras models saved with save_format="tf" can be loaded."""
mlflow.tensorflow.save_model(
tf_keras_model, path=model_path, keras_model_kwargs={"save_format": "tf"}
)
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
flavor_conf = model_conf.flavors.get(mlflow.tensorflow.FLAVOR_NAME, None)
assert flavor_conf is not None
assert flavor_conf.get("save_format") == "tf"
assert not os.path.exists(
os.path.join(model_path, "data", "model.h5")
), "TF model was saved with HDF5 format; expected SavedModel"
assert os.path.isdir(
os.path.join(model_path, "data", "model")
), "Expected directory containing saved_model.pb"
model_loaded = mlflow.tensorflow.load_model(model_path)
np.testing.assert_allclose(model_loaded.predict(data[0]), tf_keras_model.predict(data[0]))
def test_load_without_save_format(tf_keras_model, model_path, data):
"""Ensures that keras models without save_format can still be loaded."""
mlflow.tensorflow.save_model(
tf_keras_model, path=model_path, keras_model_kwargs={"save_format": "h5"}
)
model_conf_path = os.path.join(model_path, "MLmodel")
model_conf = Model.load(model_conf_path)
flavor_conf = model_conf.flavors.get(mlflow.tensorflow.FLAVOR_NAME)
assert flavor_conf is not None
del flavor_conf["save_format"]
model_conf.save(model_conf_path)
model_loaded = mlflow.tensorflow.load_model(model_path)
np.testing.assert_allclose(model_loaded.predict(data[0]), tf_keras_model.predict(data[0]))
# TODO: Remove skipif condition `not Version(tf.__version__).is_devrelease` once
# https://github.com/huggingface/transformers/issues/22421 is resolved.
@pytest.mark.skipif(
not (
_is_importable("transformers")
and Version(tf.__version__) >= Version("2.6.0")
and not Version(tf.__version__).is_devrelease
),
reason="This test requires transformers, which is no longer compatible with Keras < 2.6.0, "
"and transformers is not compatible with Tensorflow dev version, see "
"https://github.com/huggingface/transformers/issues/22421",
)
def test_pyfunc_serve_and_score_transformers():
from transformers import BertConfig, TFBertModel # pylint: disable=import-error
bert_model = TFBertModel(
BertConfig(
vocab_size=16,
hidden_size=2,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=2,
)
)
dummy_inputs = bert_model.dummy_inputs["input_ids"].numpy()
input_ids = tf.keras.layers.Input(shape=(dummy_inputs.shape[1],), dtype=tf.int32)
model = tf.keras.Model(
inputs=[input_ids], outputs=[bert_model.bert(input_ids).last_hidden_state]
)
model.compile()
with mlflow.start_run():
mlflow.tensorflow.log_model(
model,
artifact_path="model",
extra_pip_requirements=extra_pip_requirements,
)
model_uri = mlflow.get_artifact_uri("model")
data = json.dumps({"inputs": dummy_inputs.tolist()})
resp = pyfunc_serve_and_score_model(
model_uri,
data,
pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
scores = PredictionsResponse.from_json(resp.content.decode("utf-8")).get_predictions(
predictions_format="ndarray"
)
assert_array_almost_equal(scores, model.predict(dummy_inputs))
def test_log_model_with_code_paths(model):
artifact_path = "model"
with mlflow.start_run(), mock.patch(
"mlflow.tensorflow._add_code_from_conf_to_system_path"
) as add_mock:
mlflow.tensorflow.log_model(model, artifact_path=artifact_path, code_paths=[__file__])
model_uri = mlflow.get_artifact_uri(artifact_path)
_compare_logged_code_paths(__file__, model_uri, mlflow.tensorflow.FLAVOR_NAME)
mlflow.tensorflow.load_model(model_uri)
add_mock.assert_called()
def test_virtualenv_subfield_points_to_correct_path(model, model_path):
mlflow.tensorflow.save_model(model, path=model_path)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
python_env_path = Path(model_path, pyfunc_conf[pyfunc.ENV]["virtualenv"])
assert python_env_path.exists()
assert python_env_path.is_file()
def save_or_log_keras_model_by_mlflow128(tmp_path, task_type, save_as_type, save_path=None):
tf_tests_dir = os.path.dirname(__file__)
conda_env = get_or_create_conda_env(os.path.join(tf_tests_dir, "mlflow-128-tf-23-env.yaml"))
output_data_file_path = os.path.join(tmp_path, "output_data.pkl")
tracking_uri = mlflow.get_tracking_uri()
exec_py_path = os.path.join(tf_tests_dir, "save_keras_model.py")
conda_env.execute(
f"python {exec_py_path} "
f"--tracking_uri {tracking_uri} "
f"--task_type {task_type} "
f"--save_as_type {save_as_type} "
f"--save_path {save_path if save_path else 'none'}",
)
with open(output_data_file_path, "rb") as f:
inference_df, expected_results_df, run_id = pickle.load(f)
return ModelDataInfo(
inference_df=inference_df,
expected_results_df=expected_results_df,
raw_results=None,
raw_df=None,
run_id=run_id,
)
def test_load_and_predict_keras_model_saved_by_mlflow128(tmp_path, monkeypatch):
mlflow.set_tracking_uri(tmp_path.joinpath("mlruns").as_uri())
monkeypatch.chdir(tmp_path)
model_data_info = save_or_log_keras_model_by_mlflow128(
tmp_path, task_type="log_model", save_as_type="keras"
)
model_uri = f"runs:/{model_data_info.run_id}/model"
def load_and_predict(load_model_fn):
mlflow_model = load_model_fn()
predictions = mlflow_model.predict(model_data_info.inference_df)
np.testing.assert_allclose(predictions, model_data_info.expected_results_df)
load_and_predict(lambda: mlflow.pyfunc.load_model(model_uri))
load_and_predict(lambda: mlflow.tensorflow.load_model(model_uri))
def test_load_tf_keras_model_with_options(tf_keras_model, model_path):
mlflow.tensorflow.save_model(tf_keras_model, path=model_path)
keras_model_kwargs = {
"compile": False,
"options": tf.saved_model.LoadOptions(),
}
with mock.patch("mlflow.tensorflow._load_keras_model") as mock_load:
mlflow.tensorflow.load_model(model_path, keras_model_kwargs=keras_model_kwargs)
mock_load.assert_called_once_with(
model_path=mock.ANY, keras_module=mock.ANY, save_format=mock.ANY, **keras_model_kwargs
)
def test_tf_saved_model_model_with_tf_keras_api(tmp_path, monkeypatch):
mlflow.set_tracking_uri(tmp_path.joinpath("mlruns").as_uri())
monkeypatch.chdir(tmp_path)
model_data_info = save_or_log_keras_model_by_mlflow128(
tmp_path, task_type="log_model", save_as_type="tf1-estimator"
)
model_uri = f"runs:/{model_data_info.run_id}/model"
mlflow_model = mlflow.pyfunc.load_model(model_uri)
predictions = mlflow_model.predict({"features": model_data_info.inference_df})
np.testing.assert_allclose(predictions["dense"], model_data_info.expected_results_df)
def test_model_save_load_with_metadata(tf_keras_model, model_path):
mlflow.tensorflow.save_model(
tf_keras_model, path=model_path, metadata={"metadata_key": "metadata_value"}
)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_path)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
def test_model_log_with_metadata(tf_keras_model):
artifact_path = "model"
with mlflow.start_run():
mlflow.tensorflow.log_model(
tf_keras_model, artifact_path=artifact_path, metadata={"metadata_key": "metadata_value"}
)
model_uri = mlflow.get_artifact_uri(artifact_path)
reloaded_model = mlflow.pyfunc.load_model(model_uri=model_uri)
assert reloaded_model.metadata.metadata["metadata_key"] == "metadata_value"
def test_model_log_with_signature_inference(tf_keras_model, data, model_signature):
artifact_path = "model"
example = data[0][:3, :]
with mlflow.start_run():
mlflow.tensorflow.log_model(
tf_keras_model, artifact_path=artifact_path, input_example=example
)
model_uri = mlflow.get_artifact_uri(artifact_path)
mlflow_model = Model.load(model_uri)
assert mlflow_model.signature == model_signature
|
fb5bf25297c14677d70982413480aa3de9863f96
|
65ccaf1937bb59bc13422f214267bcaac6a0c628
|
/aws-inventory/lambda/inventory-client-vpn.py
|
a76960a76610a44d5f8f1401e7d3b73fb4660c56
|
[
"Apache-2.0"
] |
permissive
|
turnerlabs/antiope
|
76b803872f4efa9e465288231564b30cf984304e
|
ca141dbc467168902b96250e3bb054ced24b4ca8
|
refs/heads/development
| 2023-07-25T03:10:33.603230
| 2021-03-24T13:24:29
| 2021-03-24T13:24:29
| 150,344,256
| 227
| 46
|
Apache-2.0
| 2022-01-06T14:32:02
| 2018-09-26T00:12:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,185
|
py
|
inventory-client-vpn.py
|
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
from datetime import datetime, timezone
from dateutil import tz
from antiope.aws_account import *
from common import *
import logging
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
RESOURCE_PATH = "ec2/clientvpn"
def lambda_handler(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=True))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Received message: " + json.dumps(message, sort_keys=True))
try:
target_account = AWSAccount(message['account_id'])
for r in target_account.get_regions():
try:
discover_client_vpn_endpoints(target_account, r)
except ClientError as e:
# Move onto next region if we get access denied. This is probably SCPs
if e.response['Error']['Code'] == 'AccessDeniedException':
logger.error(f"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})")
continue
elif e.response['Error']['Code'] == 'UnauthorizedOperation':
logger.error(f"UnauthorizedOperation for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})")
continue
else:
raise # pass on to the next handler
except AntiopeAssumeRoleError as e:
logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id))
return()
except ClientError as e:
if e.response['Error']['Code'] == 'UnauthorizedOperation':
logger.error("Antiope doesn't have proper permissions to this account")
return(event)
logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e))
capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e))
raise
except Exception as e:
logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context)))
capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e))
raise
def discover_client_vpn_endpoints(target_account, region):
'''Iterate accross all regions to discover client vpn endpoints'''
ec2_client = target_account.get_client('ec2', region=region)
response = ec2_client.describe_client_vpn_endpoints()
if response['ClientVpnEndpoints']:
for cvpn in response['ClientVpnEndpoints']:
resource_item = {}
resource_item['awsAccountId'] = target_account.account_id
resource_item['awsAccountName'] = target_account.account_name
resource_item['resourceType'] = "AWS::EC2::ClientVpnEndpoint"
resource_item['source'] = "Antiope"
resource_item['awsRegion'] = region
resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())
resource_item['configuration'] = cvpn
resource_item['supplementaryConfiguration'] = {}
resource_item['resourceId'] = cvpn['ClientVpnEndpointId']
resource_item['resourceCreationTime'] = cvpn['CreationTime']
resource_item['errors'] = {}
if 'Tags' in cvpn:
resource_item['tags'] = parse_tags(cvpn['Tags'])
# Get any active VPN connections to the endpoint and add as part of the supplementary configuration.
connections = discover_client_vpn_connections(ec2_client, cvpn['ClientVpnEndpointId'])
resource_item['supplementaryConfiguration']['Connections'] = connections
# Obtain other network configuration associated with the VPN endpoint and add as part of the supplementary configuration.
routes = discover_client_vpn_routes(ec2_client, cvpn['ClientVpnEndpointId'])
resource_item['supplementaryConfiguration']['Routes'] = routes
targets = discover_client_vpn_targets(ec2_client, cvpn['ClientVpnEndpointId'])
resource_item['supplementaryConfiguration']['ClientVpnTargetNetworks'] = targets
# Save files to S3
save_resource_to_s3(RESOURCE_PATH, cvpn['ClientVpnEndpointId'], resource_item)
logger.info("Discovered Client VPN connection ({}) in account {} for region {}".format(cvpn['ClientVpnEndpointId'], target_account.account_id, region))
logger.debug("Data: {}".format(resource_item))
else:
logger.debug("No Client VPN connections found for account {} in region {}".format(target_account.account_id, region))
def discover_client_vpn_connections(ec2_client, vpnId):
'''Get client VPN endpoint configuration based on the endpointId'''
response = ec2_client.describe_client_vpn_connections(
ClientVpnEndpointId=vpnId,
)
return(response['Connections'])
def discover_client_vpn_routes(ec2_client, vpnId):
'''Get client VPN routes configuration based on the endpointId'''
response = ec2_client.describe_client_vpn_routes(
ClientVpnEndpointId=vpnId,
)
return(response['Routes'])
def discover_client_vpn_targets(ec2_client, vpnId):
'''Get client VPN target networks configuration based on the endpointId'''
response = ec2_client.describe_client_vpn_target_networks(
ClientVpnEndpointId=vpnId,
)
return(response['ClientVpnTargetNetworks'])
|
485a0aed0cab9ef98b8161c6465eeed7929c2c37
|
ee87c715e5d937b0380ddb87d56e9ebc4877a02b
|
/sklearn/cluster/tests/test_affinity_propagation.py
|
9f82957d2067aa91ccfa843979b71bd47a5f8bfc
|
[
"BSD-3-Clause"
] |
permissive
|
scikit-learn/scikit-learn
|
27a2196f3173e0f32f7a5c5d652b70a6c57c7644
|
061f8777b48e5491b0c57bb8e0bc7067c103079d
|
refs/heads/main
| 2023-08-18T15:32:59.764468
| 2023-08-18T14:39:08
| 2023-08-18T14:39:08
| 843,222
| 58,456
| 29,777
|
BSD-3-Clause
| 2023-09-14T19:08:34
| 2010-08-17T09:43:38
|
Python
|
UTF-8
|
Python
| false
| false
| 11,533
|
py
|
test_affinity_propagation.py
|
"""
Testing for Clustering methods
"""
import warnings
import numpy as np
import pytest
from sklearn.cluster import AffinityPropagation, affinity_propagation
from sklearn.cluster._affinity_propagation import _equal_similarities_and_preferences
from sklearn.datasets import make_blobs
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.metrics import euclidean_distances
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import CSR_CONTAINERS
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(
n_samples=60,
n_features=2,
centers=centers,
cluster_std=0.4,
shuffle=True,
random_state=0,
)
# TODO: AffinityPropagation must preserve dtype for its fitted attributes
# and test must be created accordingly to this new behavior.
# For more details, see: https://github.com/scikit-learn/scikit-learn/issues/11000
def test_affinity_propagation(global_random_seed, global_dtype):
"""Test consistency of the affinity propagations."""
S = -euclidean_distances(X.astype(global_dtype, copy=False), squared=True)
preference = np.median(S) * 10
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference, random_state=global_random_seed
)
n_clusters_ = len(cluster_centers_indices)
assert n_clusters == n_clusters_
def test_affinity_propagation_precomputed():
"""Check equality of precomputed affinity matrix to internally computed affinity
matrix.
"""
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
af = AffinityPropagation(
preference=preference, affinity="precomputed", random_state=28
)
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True, random_state=37)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert np.unique(labels).size == n_clusters_
assert n_clusters == n_clusters_
def test_affinity_propagation_no_copy():
"""Check behaviour of not copying the input data."""
S = -euclidean_distances(X, squared=True)
S_original = S.copy()
preference = np.median(S) * 10
assert not np.allclose(S.diagonal(), preference)
# with copy=True S should not be modified
affinity_propagation(S, preference=preference, copy=True, random_state=0)
assert_allclose(S, S_original)
assert not np.allclose(S.diagonal(), preference)
assert_allclose(S.diagonal(), np.zeros(S.shape[0]))
# with copy=False S will be modified inplace
affinity_propagation(S, preference=preference, copy=False, random_state=0)
assert_allclose(S.diagonal(), preference)
# test that copy=True and copy=False lead to the same result
S = S_original.copy()
af = AffinityPropagation(preference=preference, verbose=True, random_state=0)
labels = af.fit(X).labels_
_, labels_no_copy = affinity_propagation(
S, preference=preference, copy=False, random_state=74
)
assert_array_equal(labels, labels_no_copy)
def test_affinity_propagation_affinity_shape():
"""Check the shape of the affinity matrix when using `affinity_propagation."""
S = -euclidean_distances(X, squared=True)
err_msg = "The matrix of similarities must be a square array"
with pytest.raises(ValueError, match=err_msg):
affinity_propagation(S[:, :-1])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_affinity_propagation_precomputed_with_sparse_input(csr_container):
err_msg = "A sparse matrix was passed, but dense data is required"
with pytest.raises(TypeError, match=err_msg):
AffinityPropagation(affinity="precomputed").fit(csr_container((3, 3)))
def test_affinity_propagation_predict(global_random_seed, global_dtype):
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean", random_state=global_random_seed)
X_ = X.astype(global_dtype, copy=False)
labels = af.fit_predict(X_)
labels2 = af.predict(X_)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
with pytest.raises(NotFittedError):
af.predict(X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed", random_state=57)
af.fit(S)
with pytest.raises(ValueError, match="expecting 60 features as input"):
af.predict(X)
def test_affinity_propagation_fit_non_convergence(global_dtype):
# In case of non-convergence of affinity_propagation(), the cluster
# centers should be an empty array and training samples should be labelled
# as noise (-1)
X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype)
# Force non-convergence by allowing only a single iteration
af = AffinityPropagation(preference=-10, max_iter=1, random_state=82)
with pytest.warns(ConvergenceWarning):
af.fit(X)
assert_allclose(np.empty((0, 2)), af.cluster_centers_)
assert_array_equal(np.array([-1, -1, -1]), af.labels_)
def test_affinity_propagation_equal_mutual_similarities(global_dtype):
X = np.array([[-1, 1], [1, -1]], dtype=global_dtype)
S = -euclidean_distances(X, squared=True)
# setting preference > similarity
with pytest.warns(UserWarning, match="mutually equal"):
cluster_center_indices, labels = affinity_propagation(S, preference=0)
# expect every sample to become an exemplar
assert_array_equal([0, 1], cluster_center_indices)
assert_array_equal([0, 1], labels)
# setting preference < similarity
with pytest.warns(UserWarning, match="mutually equal"):
cluster_center_indices, labels = affinity_propagation(S, preference=-10)
# expect one cluster, with arbitrary (first) sample as exemplar
assert_array_equal([0], cluster_center_indices)
assert_array_equal([0, 0], labels)
# setting different preferences
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
cluster_center_indices, labels = affinity_propagation(
S, preference=[-20, -10], random_state=37
)
# expect one cluster, with highest-preference sample as exemplar
assert_array_equal([1], cluster_center_indices)
assert_array_equal([0, 0], labels)
def test_affinity_propagation_predict_non_convergence(global_dtype):
# In case of non-convergence of affinity_propagation(), the cluster
# centers should be an empty array
X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype)
# Force non-convergence by allowing only a single iteration
with pytest.warns(ConvergenceWarning):
af = AffinityPropagation(preference=-10, max_iter=1, random_state=75).fit(X)
# At prediction time, consider new samples as noise since there are no
# clusters
to_predict = np.array([[2, 2], [3, 3], [4, 4]])
with pytest.warns(ConvergenceWarning):
y = af.predict(to_predict)
assert_array_equal(np.array([-1, -1, -1]), y)
def test_affinity_propagation_non_convergence_regressiontest(global_dtype):
X = np.array(
[[1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 1]], dtype=global_dtype
)
af = AffinityPropagation(affinity="euclidean", max_iter=2, random_state=34)
msg = (
"Affinity propagation did not converge, this model may return degenerate"
" cluster centers and labels."
)
with pytest.warns(ConvergenceWarning, match=msg):
af.fit(X)
assert_array_equal(np.array([0, 0, 0]), af.labels_)
def test_equal_similarities_and_preferences(global_dtype):
# Unequal distances
X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype)
S = -euclidean_distances(X, squared=True)
assert not _equal_similarities_and_preferences(S, np.array(0))
assert not _equal_similarities_and_preferences(S, np.array([0, 0]))
assert not _equal_similarities_and_preferences(S, np.array([0, 1]))
# Equal distances
X = np.array([[0, 0], [1, 1]], dtype=global_dtype)
S = -euclidean_distances(X, squared=True)
# Different preferences
assert not _equal_similarities_and_preferences(S, np.array([0, 1]))
# Same preferences
assert _equal_similarities_and_preferences(S, np.array([0, 0]))
assert _equal_similarities_and_preferences(S, np.array(0))
def test_affinity_propagation_random_state():
"""Check that different random states lead to different initialisations
by looking at the center locations after two iterations.
"""
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=300, centers=centers, cluster_std=0.5, random_state=0
)
# random_state = 0
ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=0)
ap.fit(X)
centers0 = ap.cluster_centers_
# random_state = 76
ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=76)
ap.fit(X)
centers76 = ap.cluster_centers_
# check that the centers have not yet converged to the same solution
assert np.mean((centers0 - centers76) ** 2) > 1
@pytest.mark.parametrize("container", CSR_CONTAINERS + [np.array])
def test_affinity_propagation_convergence_warning_dense_sparse(container, global_dtype):
"""
Check that having sparse or dense `centers` format should not
influence the convergence.
Non-regression test for gh-13334.
"""
centers = container(np.zeros((1, 10)))
rng = np.random.RandomState(42)
X = rng.rand(40, 10).astype(global_dtype, copy=False)
y = (4 * rng.rand(40)).astype(int)
ap = AffinityPropagation(random_state=46)
ap.fit(X, y)
ap.cluster_centers_ = centers
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
assert_array_equal(ap.predict(X), np.zeros(X.shape[0], dtype=int))
# FIXME; this test is broken with different random states, needs to be revisited
def test_correct_clusters(global_dtype):
# Test to fix incorrect clusters due to dtype change
# (non-regression test for issue #10832)
X = np.array(
[[1, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 1]], dtype=global_dtype
)
afp = AffinityPropagation(preference=1, affinity="precomputed", random_state=0).fit(
X
)
expected = np.array([0, 1, 1, 2])
assert_array_equal(afp.labels_, expected)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_input_for_predict(csr_container):
# Test to make sure sparse inputs are accepted for predict
# (non-regression test for issue #20049)
af = AffinityPropagation(affinity="euclidean", random_state=42)
af.fit(X)
labels = af.predict(csr_container((2, 2)))
assert_array_equal(labels, (2, 2))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_input_for_fit_predict(csr_container):
# Test to make sure sparse inputs are accepted for fit_predict
# (non-regression test for issue #20049)
af = AffinityPropagation(affinity="euclidean", random_state=42)
rng = np.random.RandomState(42)
X = csr_container(rng.randint(0, 2, size=(5, 5)))
labels = af.fit_predict(X)
assert_array_equal(labels, (0, 1, 1, 2, 3))
|
bedf698e1611076fb30a2dc95f338472da529d09
|
e457ef64e939acc769d3b4609184f1603fdd875a
|
/fastavro/_logical_writers_py.py
|
dcfedc4d6941cfeb874e043c873d21b44fe5a7d7
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
fastavro/fastavro
|
dbad8c55fabc9f22b16273ee1a926f22c840c694
|
40dfd526076446cc7f7eef97e40da216b910d047
|
refs/heads/master
| 2023-09-01T04:16:13.510802
| 2023-08-25T10:19:13
| 2023-08-25T11:05:36
| 3,845,895
| 430
| 105
|
MIT
| 2023-09-14T20:14:34
| 2012-03-27T16:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,830
|
py
|
_logical_writers_py.py
|
# cython: auto_cpdef=True
import datetime
import decimal
from io import BytesIO
import os
import time
from typing import Dict, Union
import uuid
from .const import (
MCS_PER_HOUR,
MCS_PER_MINUTE,
MCS_PER_SECOND,
MLS_PER_HOUR,
MLS_PER_MINUTE,
MLS_PER_SECOND,
DAYS_SHIFT,
)
is_windows = os.name == "nt"
epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
epoch_naive = datetime.datetime(1970, 1, 1)
def prepare_timestamp_millis(data, schema):
"""Converts datetime.datetime object to int timestamp with milliseconds"""
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = data - epoch
return (delta.days * 24 * 3600 + delta.seconds) * MLS_PER_SECOND + int(
delta.microseconds / 1000
)
# On Windows, mktime does not support pre-epoch, see e.g.
# https://stackoverflow.com/questions/2518706/python-mktime-overflow-error
if is_windows:
delta = data - epoch_naive
return (delta.days * 24 * 3600 + delta.seconds) * MLS_PER_SECOND + int(
delta.microseconds / 1000
)
else:
return int(time.mktime(data.timetuple())) * MLS_PER_SECOND + int(
data.microsecond / 1000
)
else:
return data
def prepare_local_timestamp_millis(
data: Union[datetime.datetime, int], schema: Dict
) -> int:
"""Converts datetime.datetime object to int timestamp with milliseconds.
The local-timestamp-millis logical type represents a timestamp in a local
timezone, regardless of what specific time zone is considered local, with a
precision of one millisecond.
"""
if isinstance(data, datetime.datetime):
delta = data.replace(tzinfo=datetime.timezone.utc) - epoch
return (delta.days * 24 * 3600 + delta.seconds) * MLS_PER_SECOND + int(
delta.microseconds / 1000
)
else:
return data
def prepare_timestamp_micros(data, schema):
"""Converts datetime.datetime to int timestamp with microseconds"""
if isinstance(data, datetime.datetime):
if data.tzinfo is not None:
delta = data - epoch
return (
delta.days * 24 * 3600 + delta.seconds
) * MCS_PER_SECOND + delta.microseconds
# On Windows, mktime does not support pre-epoch, see e.g.
# https://stackoverflow.com/questions/2518706/python-mktime-overflow-error
if is_windows:
delta = data - epoch_naive
return (
delta.days * 24 * 3600 + delta.seconds
) * MCS_PER_SECOND + delta.microseconds
else:
return (
int(time.mktime(data.timetuple())) * MCS_PER_SECOND + data.microsecond
)
else:
return data
def prepare_local_timestamp_micros(
data: Union[datetime.datetime, int], schema: Dict
) -> int:
"""Converts datetime.datetime to int timestamp with microseconds
The local-timestamp-micros logical type represents a timestamp in a local
timezone, regardless of what specific time zone is considered local, with a
precision of one microsecond.
"""
if isinstance(data, datetime.datetime):
delta = data.replace(tzinfo=datetime.timezone.utc) - epoch
return (
delta.days * 24 * 3600 + delta.seconds
) * MCS_PER_SECOND + delta.microseconds
else:
return data
def prepare_date(data, schema):
"""Converts datetime.date to int timestamp"""
if isinstance(data, datetime.date):
return data.toordinal() - DAYS_SHIFT
elif isinstance(data, str):
return datetime.date.fromisoformat(data).toordinal() - DAYS_SHIFT
else:
return data
def prepare_bytes_decimal(data, schema):
"""Convert decimal.Decimal to bytes"""
if not isinstance(data, decimal.Decimal):
return data
scale = schema.get("scale", 0)
precision = schema["precision"]
sign, digits, exp = data.as_tuple()
if len(digits) > precision:
raise ValueError("The decimal precision is bigger than allowed by schema")
delta = exp + scale
if delta < 0:
raise ValueError("Scale provided in schema does not match the decimal")
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
unscaled_datum = 10**delta * unscaled_datum
bytes_req = (unscaled_datum.bit_length() + 8) // 8
if sign:
unscaled_datum = -unscaled_datum
return unscaled_datum.to_bytes(bytes_req, byteorder="big", signed=True)
def prepare_fixed_decimal(data, schema):
"""Converts decimal.Decimal to fixed length bytes array"""
if not isinstance(data, decimal.Decimal):
return data
scale = schema.get("scale", 0)
size = schema["size"]
precision = schema["precision"]
# based on https://github.com/apache/avro/pull/82/
sign, digits, exp = data.as_tuple()
if len(digits) > precision:
raise ValueError("The decimal precision is bigger than allowed by schema")
if -exp > scale:
raise ValueError("Scale provided in schema does not match the decimal")
delta = exp + scale
if delta > 0:
digits = digits + (0,) * delta
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
bits_req = unscaled_datum.bit_length() + 1
size_in_bits = size * 8
offset_bits = size_in_bits - bits_req
mask = 2**size_in_bits - 1
bit = 1
for i in range(bits_req):
mask ^= bit
bit <<= 1
if bits_req < 8:
bytes_req = 1
else:
bytes_req = bits_req // 8
if bits_req % 8 != 0:
bytes_req += 1
tmp = BytesIO()
if sign:
unscaled_datum = (1 << bits_req) - unscaled_datum
unscaled_datum = mask | unscaled_datum
for index in range(size - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(bytes([bits_to_write & 0xFF]))
else:
for i in range(offset_bits // 8):
tmp.write(bytes([0]))
for index in range(bytes_req - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(bytes([bits_to_write & 0xFF]))
return tmp.getvalue()
def prepare_uuid(data, schema):
"""Converts uuid.UUID to
string formatted UUID xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
"""
if isinstance(data, uuid.UUID):
return str(data)
else:
return data
def prepare_time_millis(data, schema):
"""Convert datetime.time to int timestamp with milliseconds"""
if isinstance(data, datetime.time):
return int(
data.hour * MLS_PER_HOUR
+ data.minute * MLS_PER_MINUTE
+ data.second * MLS_PER_SECOND
+ int(data.microsecond / 1000)
)
else:
return data
def prepare_time_micros(data, schema):
"""Convert datetime.time to int timestamp with microseconds"""
if isinstance(data, datetime.time):
return int(
data.hour * MCS_PER_HOUR
+ data.minute * MCS_PER_MINUTE
+ data.second * MCS_PER_SECOND
+ data.microsecond
)
else:
return data
LOGICAL_WRITERS = {
"long-timestamp-millis": prepare_timestamp_millis,
"long-local-timestamp-millis": prepare_local_timestamp_millis,
"long-timestamp-micros": prepare_timestamp_micros,
"long-local-timestamp-micros": prepare_local_timestamp_micros,
"int-date": prepare_date,
"bytes-decimal": prepare_bytes_decimal,
"fixed-decimal": prepare_fixed_decimal,
"string-uuid": prepare_uuid,
"int-time-millis": prepare_time_millis,
"long-time-micros": prepare_time_micros,
}
|
bce42559e9d308ba98a354d9d55b66a07443c62d
|
6edc13730dca64071d4c10df093792c3726d9ced
|
/benchmarks/producer_performance.py
|
c0de6fd239d9f5d03b8358c16e508fab5cae75a0
|
[
"Apache-2.0"
] |
permissive
|
dpkp/kafka-python
|
2ce9f38d716430663a8b069a9ddd85b3a163b3a3
|
a33fcf4d22bdf34e9660e394a7a6f84225411325
|
refs/heads/master
| 2023-08-24T21:15:11.282283
| 2023-08-09T16:44:53
| 2023-08-09T16:44:53
| 5,934,517
| 4,979
| 1,251
|
Apache-2.0
| 2023-09-04T18:34:29
| 2012-09-24T13:00:26
|
Python
|
UTF-8
|
Python
| false
| false
| 5,247
|
py
|
producer_performance.py
|
#!/usr/bin/env python
# Adapted from https://github.com/mrafayaleem/kafka-jython
from __future__ import absolute_import, print_function
import argparse
import pprint
import sys
import threading
import traceback
from kafka.vendor.six.moves import range
from kafka import KafkaProducer
from test.fixtures import KafkaFixture, ZookeeperFixture
def start_brokers(n):
print('Starting {0} {1}-node cluster...'.format(KafkaFixture.kafka_version, n))
print('-> 1 Zookeeper')
zk = ZookeeperFixture.instance()
print('---> {0}:{1}'.format(zk.host, zk.port))
print()
partitions = min(n, 3)
replicas = min(n, 3)
print('-> {0} Brokers [{1} partitions / {2} replicas]'.format(n, partitions, replicas))
brokers = [
KafkaFixture.instance(i, zk, zk_chroot='',
partitions=partitions, replicas=replicas)
for i in range(n)
]
for broker in brokers:
print('---> {0}:{1}'.format(broker.host, broker.port))
print()
return brokers
class ProducerPerformance(object):
@staticmethod
def run(args):
try:
props = {}
for prop in args.producer_config:
k, v = prop.split('=')
try:
v = int(v)
except ValueError:
pass
if v == 'None':
v = None
props[k] = v
if args.brokers:
brokers = start_brokers(args.brokers)
props['bootstrap_servers'] = ['{0}:{1}'.format(broker.host, broker.port)
for broker in brokers]
print("---> bootstrap_servers={0}".format(props['bootstrap_servers']))
print()
print('-> OK!')
print()
print('Initializing producer...')
record = bytes(bytearray(args.record_size))
props['metrics_sample_window_ms'] = args.stats_interval * 1000
producer = KafkaProducer(**props)
for k, v in props.items():
print('---> {0}={1}'.format(k, v))
print('---> send {0} byte records'.format(args.record_size))
print('---> report stats every {0} secs'.format(args.stats_interval))
print('---> raw metrics? {0}'.format(args.raw_metrics))
timer_stop = threading.Event()
timer = StatsReporter(args.stats_interval, producer,
event=timer_stop,
raw_metrics=args.raw_metrics)
timer.start()
print('-> OK!')
print()
for i in range(args.num_records):
producer.send(topic=args.topic, value=record)
producer.flush()
timer_stop.set()
except Exception:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
sys.exit(1)
class StatsReporter(threading.Thread):
def __init__(self, interval, producer, event=None, raw_metrics=False):
super(StatsReporter, self).__init__()
self.interval = interval
self.producer = producer
self.event = event
self.raw_metrics = raw_metrics
def print_stats(self):
metrics = self.producer.metrics()
if self.raw_metrics:
pprint.pprint(metrics)
else:
print('{record-send-rate} records/sec ({byte-rate} B/sec),'
' {request-latency-avg} latency,'
' {record-size-avg} record size,'
' {batch-size-avg} batch size,'
' {records-per-request-avg} records/req'
.format(**metrics['producer-metrics']))
def print_final(self):
self.print_stats()
def run(self):
while self.event and not self.event.wait(self.interval):
self.print_stats()
else:
self.print_final()
def get_args_parser():
parser = argparse.ArgumentParser(
description='This tool is used to verify the producer performance.')
parser.add_argument(
'--topic', type=str,
help='Topic name for test',
default='kafka-python-benchmark-test')
parser.add_argument(
'--num-records', type=int,
help='number of messages to produce',
default=1000000)
parser.add_argument(
'--record-size', type=int,
help='message size in bytes',
default=100)
parser.add_argument(
'--producer-config', type=str, nargs='+', default=(),
help='kafka producer related configuaration properties like '
'bootstrap_servers,client_id etc..')
parser.add_argument(
'--brokers', type=int,
help='Number of kafka brokers to start',
default=0)
parser.add_argument(
'--stats-interval', type=int,
help='Interval in seconds for stats reporting to console',
default=5)
parser.add_argument(
'--raw-metrics', action='store_true',
help='Enable this flag to print full metrics dict on each interval')
return parser
if __name__ == '__main__':
args = get_args_parser().parse_args()
ProducerPerformance.run(args)
|
9a56c6930260c8307bbdf0018349440c429a2d1d
|
1518698c3f7c70912f4079261a3b7b81608bed63
|
/python/pyxel/editor/__init__.py
|
570bbf43fe76e568a6e50d5fc53baf23a241abe2
|
[
"MIT"
] |
permissive
|
kitao/pyxel
|
2cff908a302ed316b31f55511977769e4c356fa5
|
5bffc3516e6f961b45098512ea90bf08c804d71e
|
refs/heads/main
| 2023-07-13T19:46:27.473953
| 2023-07-12T14:23:47
| 2023-07-12T14:23:47
| 136,780,445
| 13,103
| 1,060
|
MIT
| 2023-08-27T13:58:10
| 2018-06-10T04:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 79
|
py
|
__init__.py
|
from . import additional_apis # noqa: F401
from .app import App # noqa: F401
|
e03fea013e1e096a33c86d24b6bd15121c401483
|
9b9487dcc5a1c7efe527070aea4d35ef2bcc3bff
|
/sync_settings/sync_version.py
|
4440aab86a0bc60faab3c2ed12be191f09e49d84
|
[
"MIT"
] |
permissive
|
mfuentesg/SyncSettings
|
c9a193fbc19262b25998b03bd9beb12eb0d0cc97
|
41fa229a922563d49034b61a08d07d642ebc0527
|
refs/heads/master
| 2023-07-02T18:17:48.401929
| 2022-07-28T13:48:23
| 2022-07-28T13:48:23
| 39,971,725
| 333
| 53
|
MIT
| 2022-12-09T05:18:46
| 2015-07-30T20:46:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
sync_version.py
|
# -*- coding: utf-8 -*
import sublime
import json
import os
from .libs.gist import Gist
from .libs import settings, path, file
file_path = path.join(os.path.expanduser('~'), '.sync_settings', 'sync.json')
def get_local_version():
if not path.exists(file_path):
return {}
try:
with open(file_path) as f:
return file.encode_json(f.read())
except: # noqa: E722
pass
return {}
def get_remote_version():
try:
commit = Gist(
http_proxy=settings.get('http_proxy'),
https_proxy=settings.get('https_proxy')
).commits(settings.get('gist_id'))[0]
return {
'hash': commit['version'],
'created_at': commit['committed_at'],
}
except: # noqa: E722
pass
return {}
def update_config_file(info):
with open(file_path, 'w') as f:
json.dump(info, f)
def show_update_dialog(on_yes=None):
msg = (
'Sync Settings:\n\n'
'Your settings seem out of date.\n\n'
'Do you want to download the latest version?'
)
if sublime.yes_no_cancel_dialog(msg) == sublime.DIALOG_YES:
# call download command
if on_yes:
on_yes()
def upgrade():
local = get_local_version()
if not local.get('hash', ''):
show_update_dialog()
return
remote = get_remote_version()
if local['hash'] == remote.get('hash', ''):
return
# TODO: check if get remote version failed
if local['created_at'] < remote.get('created_at', ''):
show_update_dialog(
on_yes=lambda: update_config_file(remote)
)
|
173a9c120a76965bb00a193d39047e0552378148
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorSingleExpression.py
|
f6061634c60d10a477fc4c5ce11b1339ea159345
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 15
|
py
|
percentOperatorSingleExpression.py
|
u'%04.5r' % 42
|
32bc3681c5c721ccf38a2479f64b2d075c774f92
|
c2d48caa5db7e746a38beca625406fcf47379d3c
|
/src/olympia/addons/admin.py
|
ad6e236a5d8e0b32742bba216da9cc65919c2f75
|
[] |
permissive
|
mozilla/addons-server
|
1f6269ec0a4aa5a0142a5f81978ef674daf213a7
|
e0f043bca8a64478e2ba62f877c9dc28620be22f
|
refs/heads/master
| 2023-09-01T09:34:41.867534
| 2023-09-01T07:21:22
| 2023-09-01T07:21:22
| 16,416,867
| 920
| 590
|
BSD-3-Clause
| 2023-09-14T16:15:01
| 2014-01-31T18:44:15
|
Python
|
UTF-8
|
Python
| false
| false
| 21,893
|
py
|
admin.py
|
import functools
from urllib.parse import urlencode, urljoin
from django import forms, http
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.utils import display_for_field
from django.core import validators
from django.db.models import Exists, OuterRef
from django.forms.models import modelformset_factory
from django.http.response import (
HttpResponseForbidden,
HttpResponseNotAllowed,
HttpResponseRedirect,
)
from django.shortcuts import get_object_or_404
from django.urls import re_path, resolve, reverse
from django.utils.encoding import force_str
from django.utils.html import format_html, format_html_join
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon, AddonReviewerFlags, AddonUser
from olympia.amo.admin import AMOModelAdmin, DateRangeFilter
from olympia.amo.forms import AMOModelForm
from olympia.amo.utils import send_mail
from olympia.files.models import File
from olympia.git.models import GitExtractionEntry
from olympia.ratings.models import Rating
from olympia.reviewers.models import NeedsHumanReview
from olympia.versions.models import Version
from olympia.zadmin.admin import related_content_link, related_single_content_link
from . import models
from .forms import AdminBaseFileFormSet, FileStatusForm
log = olympia.core.logger.getLogger('z.addons.admin')
class AddonReviewerFlagsInline(admin.TabularInline):
model = AddonReviewerFlags
verbose_name_plural = 'Reviewer Flags'
can_delete = False
view_on_site = False
class AddonUserInline(admin.TabularInline):
model = AddonUser
raw_id_fields = ('user',)
readonly_fields = ('user_profile_link',)
extra = 0
def user_profile_link(self, obj):
if obj.pk:
return format_html(
'<a href="{}">Admin User Profile</a> ({})',
reverse('admin:users_userprofile_change', args=(obj.user.pk,)),
obj.user.email,
)
else:
return ''
user_profile_link.short_description = 'User Profile'
class FileInlineChecks(admin.checks.InlineModelAdminChecks):
def _check_relation(self, obj, parent_model):
"""File doesn't have a direct FK to Addon (it's via Version) so we have
to bypass this check.
"""
return []
class FileInline(admin.TabularInline):
model = File
extra = 0
max_num = 0
fields = (
'created',
'version__id',
'version__version',
'version__channel',
'version__deleted',
'status',
'version__is_blocked',
'version__needs_human_review',
)
editable_fields = ('status',)
readonly_fields = tuple(set(fields) - set(editable_fields))
can_delete = False
view_on_site = False
template = 'admin/addons/file_inline.html'
checks_class = FileInlineChecks
show_change_link = True
def version__id(self, obj):
return obj.version_id
version__id.short_description = 'Version ID'
def version__version(self, obj):
return related_single_content_link(obj, 'version')
version__version.short_description = 'Version'
def version__channel(self, obj):
return obj.version.get_channel_display()
version__channel.short_description = 'Channel'
def version__deleted(self, obj):
return obj.version.deleted
version__deleted.short_description = 'Deleted'
version__deleted.boolean = True
def version__is_blocked(self, obj):
blockversion = getattr(obj.version, 'blockversion', None)
if not blockversion:
return ''
url = blockversion.block.get_admin_url_path()
template = '<a href="{}">Blocked</a>'
return format_html(template, url)
version__is_blocked.short_description = 'Block status'
def version__needs_human_review(self, obj):
# Set by the prefetch_related() call below.
return obj.needs_human_review
version__needs_human_review.short_description = 'Needs human review'
version__needs_human_review.boolean = True
def get_formset(self, request, obj=None, **kwargs):
self.instance = obj
Formset = modelformset_factory(
File,
form=FileStatusForm,
formset=AdminBaseFileFormSet,
extra=self.get_extra(request, obj, **kwargs),
min_num=self.get_min_num(request, obj, **kwargs),
max_num=self.get_max_num(request, obj, **kwargs),
)
return Formset
def has_add_permission(self, request, obj=None):
return False
def get_queryset(self, request):
self.pager = amo.utils.paginate(
request,
Version.unfiltered.filter(addon=self.instance).values_list('pk', flat=True),
30,
)
# A list coercion so this doesn't result in a subquery with a LIMIT
# which MySQL doesn't support (at this time).
versions = list(self.pager.object_list)
qs = (
super()
.get_queryset(request)
.filter(version__in=versions)
.order_by('-version__id')
)
sub_qs = NeedsHumanReview.objects.filter(
is_active=True, version=OuterRef('version')
)
return qs.select_related('version', 'version__blockversion').annotate(
needs_human_review=Exists(sub_qs)
)
class AddonAdmin(AMOModelAdmin):
class Media(AMOModelAdmin.Media):
css = {
'all': (
'css/admin/amoadmin.css',
'css/admin/l10n.css',
'css/admin/pagination.css',
'css/admin/addons.css',
)
}
js = AMOModelAdmin.Media.js + (
'admin/js/jquery.init.js',
'js/admin/l10n.js',
)
list_display = (
'__str__',
'type',
'guid',
'status',
'average_daily_users',
'average_rating',
'authors_links',
'reviewer_links',
'reviewer_flags',
)
list_filter = (
(
'created',
DateRangeFilter,
),
'type',
'status',
(
'addonuser__user__created',
DateRangeFilter,
),
(
'addonuser__user__banned',
admin.EmptyFieldListFilter,
),
(
'reviewerflags__auto_approval_disabled',
admin.BooleanFieldListFilter,
),
(
'reviewerflags__auto_approval_disabled_unlisted',
admin.BooleanFieldListFilter,
),
(
'reviewerflags__auto_approval_disabled_until_next_approval',
admin.BooleanFieldListFilter,
),
(
'reviewerflags__auto_approval_disabled_until_next_approval_unlisted',
admin.BooleanFieldListFilter,
),
(
'reviewerflags__auto_approval_delayed_until',
DateRangeFilter,
),
(
'reviewerflags__auto_approval_delayed_until_unlisted',
DateRangeFilter,
),
)
list_select_related = ('reviewerflags',)
search_fields = ('id', 'guid__startswith', 'slug__startswith')
search_by_ip_actions = (amo.LOG.ADD_VERSION.id,)
search_by_ip_activity_accessor = 'addonlog__activity_log'
search_by_ip_activity_reverse_accessor = 'activity_log__addonlog__addon'
inlines = (
AddonReviewerFlagsInline,
AddonUserInline,
FileInline,
)
readonly_fields = (
'id',
'created',
'activity',
'average_rating',
'bayesian_rating',
'guid',
'total_ratings_link',
'text_ratings_count',
'weekly_downloads',
'average_daily_users',
'hotness',
)
fieldsets = (
(
None,
{
'fields': (
'id',
'created',
'name',
'slug',
'guid',
'default_locale',
'type',
'status',
'activity',
),
},
),
(
'Details',
{
'fields': (
'summary',
'description',
'homepage',
'eula',
'privacy_policy',
'developer_comments',
'icon_type',
),
},
),
(
'Support',
{
'fields': ('support_url', 'support_email'),
},
),
(
'Stats',
{
'fields': (
'total_ratings_link',
'average_rating',
'bayesian_rating',
'text_ratings_count',
'weekly_downloads',
'average_daily_users',
'hotness',
),
},
),
(
'Flags',
{
'fields': (
'disabled_by_user',
'requires_payment',
'is_experimental',
'reputation',
),
},
),
(
'Dictionaries and Language Packs',
{
'fields': ('target_locale',),
},
),
)
actions = ['git_extract_action']
def get_queryset_annotations(self):
# Add annotation for _unlisted_versions_exists/_listed_versions_exists
# to avoid repeating those queries for each add-on in the list.
sub_qs = Version.unfiltered.filter(addon=OuterRef('pk')).values_list('id')
annotations = {
'_unlisted_versions_exists': Exists(
sub_qs.filter(channel=amo.CHANNEL_UNLISTED)
),
'_listed_versions_exists': Exists(
sub_qs.filter(channel=amo.CHANNEL_LISTED)
),
}
return annotations
def get_queryset(self, request):
return (
Addon.unfiltered.all()
.only_translations()
.transform(Addon.attach_all_authors)
)
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return functools.update_wrapper(wrapper, view)
urlpatterns = super().get_urls()
custom_urlpatterns = [
re_path(
r'^(?P<object_id>.+)/git_extract/$',
wrap(self.git_extract_view),
name='addons_git_extract',
),
]
return custom_urlpatterns + urlpatterns
def get_rangefilter_addonuser__user__created_title(self, request, field_path):
return 'author created'
def authors_links(self, obj):
# Note: requires .transform(Addon.attach_all_authors) to have been
# applied to fill all_authors property and role on each user in it.
authors = obj.all_authors
return (
format_html(
'<ul>{}</ul>',
format_html_join(
'',
'<li><a href="{}">{} ({}{})</a></li>',
(
(
urljoin(
settings.EXTERNAL_SITE_URL,
reverse(
'admin:users_userprofile_change', args=(author.pk,)
),
),
author.email,
dict(amo.AUTHOR_CHOICES_UNFILTERED)[author.role],
', Not listed' if author.listed is False else '',
)
for author in authors
),
),
)
if authors
else '-'
)
authors_links.short_description = 'Authors'
def total_ratings_link(self, obj):
return related_content_link(
obj,
Rating,
'addon',
related_manager='without_replies',
text=obj.total_ratings,
)
total_ratings_link.short_description = 'Ratings'
def reviewer_links(self, obj):
links = []
# _has_listed_versions_exists and _has_unlisted_versions_exists are
# provided by annotations made in get_queryset()
if obj._listed_versions_exists:
links.append(
(
urljoin(
settings.EXTERNAL_SITE_URL,
reverse('reviewers.review', args=['listed', obj.id]),
),
'Review (listed)',
)
)
if obj._unlisted_versions_exists:
links.append(
(
urljoin(
settings.EXTERNAL_SITE_URL,
reverse('reviewers.review', args=['unlisted', obj.id]),
),
'Review (unlisted)',
)
)
return format_html(
'<ul>{}</ul>', format_html_join('', '<li><a href="{}">{}</a></li>', links)
)
reviewer_links.short_description = 'Reviewer links'
def change_view(self, request, object_id, form_url='', extra_context=None):
lookup_field = Addon.get_lookup_field(object_id)
if lookup_field != 'pk':
addon = None
try:
if lookup_field in ('slug', 'guid'):
addon = self.get_queryset(request).get(**{lookup_field: object_id})
except Addon.DoesNotExist:
raise http.Http404
# Don't get in an infinite loop if addon.slug.isdigit().
if addon and addon.id and addon.id != object_id:
url = request.path.replace(object_id, str(addon.id), 1)
if request.GET:
url += '?' + request.GET.urlencode()
return http.HttpResponsePermanentRedirect(url)
return super().change_view(
request, object_id, form_url, extra_context=extra_context
)
def render_change_form(
self, request, context, add=False, change=False, form_url='', obj=None
):
context.update(
{
'external_site_url': settings.EXTERNAL_SITE_URL,
'has_listed_versions': obj.has_listed_versions(include_deleted=True)
if obj
else False,
'has_unlisted_versions': obj.has_unlisted_versions(include_deleted=True)
if obj
else False,
}
)
return super().render_change_form(
request=request,
context=context,
add=add,
change=change,
form_url=form_url,
obj=obj,
)
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if 'status' in form.changed_data:
ActivityLog.create(amo.LOG.CHANGE_STATUS, obj, form.cleaned_data['status'])
log.info(
'Addon "%s" status changed to: %s'
% (obj.slug, form.cleaned_data['status'])
)
def git_extract_action(self, request, qs):
addon_ids = []
for addon in qs:
GitExtractionEntry.objects.create(addon=addon)
addon_ids.append(force_str(addon))
kw = {'addons': ', '.join(addon_ids)}
self.message_user(request, 'Git extraction triggered for "%(addons)s".' % kw)
git_extract_action.short_description = 'Git-Extract'
def git_extract_view(self, request, object_id, extra_context=None):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
if not acl.action_allowed_for(request.user, amo.permissions.ADDONS_EDIT):
return HttpResponseForbidden()
obj = get_object_or_404(Addon, id=object_id)
self.git_extract_action(request, (obj,))
return HttpResponseRedirect(
reverse('admin:addons_addon_change', args=(obj.pk,))
)
@admin.display(description='Activity Logs')
def activity(self, obj):
return related_content_link(obj, ActivityLog, 'addonlog__addon')
@admin.display(description='Flags')
def reviewer_flags(self, obj):
fields = (
field
for field in AddonReviewerFlags._meta.get_fields()
if field.name not in ('created', 'modified', 'addon')
)
try:
contents = (
(
field.verbose_name,
display_for_field(
getattr(obj.reviewerflags, field.name), field, False
),
)
for field in fields
if getattr(obj.reviewerflags, field.name)
)
if contents:
return format_html(
'<table>{}</table>',
format_html_join(
'',
'<tr class="alt"><th>{}</th><td>{}</td></tr>',
contents,
),
)
except AddonReviewerFlags.DoesNotExist:
pass
class FrozenAddonAdmin(AMOModelAdmin):
raw_id_fields = ('addon',)
class ReplacementAddonForm(AMOModelForm):
def clean_path(self):
path = None
try:
path = self.data.get('path')
site = settings.SITE_URL
if models.ReplacementAddon.path_is_external(path):
if path.startswith(site):
raise forms.ValidationError(
'Paths for [%s] should be relative, not full URLs '
'including the domain name' % site
)
validators.URLValidator()(path)
else:
path = ('/' if not path.startswith('/') else '') + path
resolve(path)
except forms.ValidationError as validation_error:
# Re-raise the ValidationError about full paths for SITE_URL.
raise validation_error
except Exception:
raise forms.ValidationError('Path [%s] is not valid' % path)
return path
class ReplacementAddonAdmin(AMOModelAdmin):
list_display = ('guid', 'path', 'guid_slug', '_url')
form = ReplacementAddonForm
def _url(self, obj):
guid_param = urlencode({'guid': obj.guid})
return format_html(
'<a href="{}">Test</a>',
reverse('addons.find_replacement') + '?%s' % guid_param,
)
def guid_slug(self, obj):
try:
slug = models.Addon.objects.get(guid=obj.guid).slug
except models.Addon.DoesNotExist:
slug = '- Add-on not on AMO -'
return slug
def has_module_permission(self, request):
# If one can see the changelist, then they have access to the module.
return self.has_change_permission(request)
def has_change_permission(self, request, obj=None):
# If an obj is passed, then we're looking at the individual change page
# for a replacement addon, otherwise we're looking at the list. When
# looking at the list, we also allow users with Addons:Edit - they
# won't be able to make any changes but they can see the list.
if obj is not None:
return super().has_change_permission(request, obj=obj)
else:
return acl.action_allowed_for(
request.user, amo.permissions.ADDONS_EDIT
) or super().has_change_permission(request, obj=obj)
@admin.register(models.AddonRegionalRestrictions)
class AddonRegionalRestrictionsAdmin(AMOModelAdmin):
list_display = ('created', 'modified', 'addon__name', 'excluded_regions')
fields = ('created', 'modified', 'addon', 'excluded_regions')
raw_id_fields = ('addon',)
readonly_fields = ('created', 'modified')
view_on_site = False
def get_readonly_fields(self, request, obj=None):
return self.readonly_fields + (('addon',) if obj else ())
def addon__name(self, obj):
return str(obj.addon)
addon__name.short_description = 'Addon'
def _send_mail(self, obj, action):
message = (
f'Regional restriction for addon "{obj.addon.name}" '
f'[{obj.addon.id}] {action}: {obj.excluded_regions}'
)
send_mail(
f'Regional Restriction {action} for Add-on',
message,
recipient_list=('amo-admins@mozilla.com',),
)
def delete_model(self, request, obj):
self._send_mail(obj, 'deleted')
super().delete_model(request, obj)
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
self._send_mail(obj, 'changed' if change else 'added')
@admin.register(models.AddonBrowserMapping)
class AddonBrowserMappingAdmin(AMOModelAdmin):
list_display = ('addon__name', 'browser', 'extension_id', 'created', 'modified')
fields = ('addon', 'browser', 'extension_id')
raw_id_fields = ('addon',)
readonly_fields = ('created', 'modified')
def addon__name(self, obj):
return str(obj.addon)
addon__name.short_description = 'Addon'
admin.site.register(models.DeniedGuid)
admin.site.register(models.Addon, AddonAdmin)
admin.site.register(models.FrozenAddon, FrozenAddonAdmin)
admin.site.register(models.ReplacementAddon, ReplacementAddonAdmin)
|
db6bce866d553523dbe261a9bbaa725b7cece3cf
|
e4b11f60c768fb1719e4158e9e701d424184c5ce
|
/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py
|
6ca3c3be178a75f491df1a50f84336a72c8fe0c9
|
[
"Apache-2.0"
] |
permissive
|
openstack/ceilometer
|
af938664ccba710547dbb4c74e5deb2175482d56
|
d31d4ed3574a5d19fe4b09ab2c227dba64da170a
|
refs/heads/master
| 2023-08-28T15:09:01.659514
| 2023-08-21T03:29:16
| 2023-08-21T03:29:16
| 6,642,735
| 246
| 289
|
Apache-2.0
| 2019-11-01T04:21:47
| 2012-11-11T18:33:12
|
Python
|
UTF-8
|
Python
| false
| false
| 24,528
|
py
|
test_inspector.py
|
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for libvirt inspector."""
from unittest import mock
import fixtures
from oslo_utils import units
from oslotest import base
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector
from ceilometer.compute.virt.libvirt import utils
from ceilometer import service
class FakeLibvirtError(Exception):
pass
class VMInstance(object):
id = 'ff58e738-12f4-4c58-acde-77617b68da56'
name = 'instance-00000001'
class TestLibvirtInspection(base.BaseTestCase):
def setUp(self):
super(TestLibvirtInspection, self).setUp()
conf = service.prepare_service([], [])
self.instance = VMInstance()
libvirt_inspector.libvirt = mock.Mock()
libvirt_inspector.libvirt.getVersion.return_value = 5001001
libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5
libvirt_inspector.libvirt.libvirtError = FakeLibvirtError
utils.libvirt = libvirt_inspector.libvirt
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=None):
self.inspector = libvirt_inspector.LibvirtInspector(conf)
def test_inspect_instance_stats(self):
domain = mock.Mock()
domain.info.return_value = (0, 0, 0, 2, 999999)
domain.memoryStats.return_value = {'available': 51200,
'unused': 25600,
'rss': 30000,
'swap_in': 5120,
'swap_out': 8192}
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
conn.domainListGetStats.return_value = [({}, {
'cpu.time': 999999,
'vcpu.maximum': 4,
'vcpu.current': 2,
'vcpu.0.time': 10000,
'vcpu.0.wait': 10000,
'vcpu.2.time': 10000,
'vcpu.2.wait': 10000,
'perf.cmt': 90112,
'perf.cpu_cycles': 7259361,
'perf.instructions': 8815623,
'perf.cache_references': 74184,
'perf.cache_misses': 16737,
'perf.mbmt': 1892352,
'perf.mbml': 1802240})]
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
stats = self.inspector.inspect_instance(self.instance, None)
self.assertEqual(2, stats.cpu_number)
self.assertEqual(40000, stats.cpu_time)
self.assertEqual(90112, stats.cpu_l3_cache_usage)
self.assertEqual(25600 / units.Ki, stats.memory_usage)
self.assertEqual(30000 / units.Ki, stats.memory_resident)
self.assertEqual(5120 / units.Ki, stats.memory_swap_in)
self.assertEqual(8192 / units.Ki, stats.memory_swap_out)
self.assertEqual(1892352, stats.memory_bandwidth_total)
self.assertEqual(1802240, stats.memory_bandwidth_local)
self.assertEqual(7259361, stats.cpu_cycles)
self.assertEqual(8815623, stats.instructions)
self.assertEqual(74184, stats.cache_references)
self.assertEqual(16737, stats.cache_misses)
def test_inspect_instance_stats_fallback_cpu_time(self):
domain = mock.Mock()
domain.info.return_value = (0, 0, 0, 2, 20000)
domain.memoryStats.return_value = {'available': 51200,
'unused': 25600,
'rss': 30000}
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
conn.domainListGetStats.return_value = [({}, {
'vcpu.current': 2,
'vcpu.maximum': 4,
'vcpu.0.time': 10000,
'vcpu.1.time': 10000,
'cpu.time': 999999})]
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
stats = self.inspector.inspect_instance(self.instance)
self.assertEqual(2, stats.cpu_number)
self.assertEqual(999999, stats.cpu_time)
def test_inspect_cpus_with_domain_shutoff(self):
domain = mock.Mock()
domain.info.return_value = (5, 0, 0, 2, 999999)
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
self.assertRaises(virt_inspector.InstanceShutOffException,
self.inspector.inspect_instance,
self.instance, None)
def test_inspect_vnics(self):
dom_xml = """
<domain type='kvm'>
<devices>
<!-- NOTE(dprince): interface with no target -->
<interface type='bridge'>
<mac address='fa:16:3e:93:31:5a'/>
<source bridge='br100'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' \
slot='0x03' function='0x0'/>
</interface>
<!-- NOTE(dprince): interface with no mac -->
<interface type='bridge'>
<source bridge='br100'/>
<target dev='foo'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' \
slot='0x03' function='0x0'/>
</interface>
<interface type='bridge'>
<mac address='fa:16:3e:71:ec:6d'/>
<source bridge='br100'/>
<target dev='vnet0'/>
<filterref filter=
'nova-instance-00000001-fa163e71ec6d'>
<parameter name='DHCPSERVER' value='10.0.0.1'/>
<parameter name='IP' value='10.0.0.2'/>
<parameter name='PROJMASK' value='255.255.255.0'/>
<parameter name='PROJNET' value='10.0.0.0'/>
</filterref>
<alias name='net0'/>
</interface>
<interface type='bridge'>
<mac address='fa:16:3e:71:ec:6e'/>
<source bridge='br100'/>
<target dev='vnet1'/>
<filterref filter=
'nova-instance-00000001-fa163e71ec6e'>
<parameter name='DHCPSERVER' value='192.168.0.1'/>
<parameter name='IP' value='192.168.0.2'/>
<parameter name='PROJMASK' value='255.255.255.0'/>
<parameter name='PROJNET' value='192.168.0.0'/>
</filterref>
<alias name='net1'/>
</interface>
<interface type='bridge'>
<mac address='fa:16:3e:96:33:f0'/>
<source bridge='qbr420008b3-7c'/>
<target dev='vnet2'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' \
slot='0x03' function='0x0'/>
</interface>
</devices>
</domain>
"""
interface_stats = {
'vnet0': (1, 2, 21, 22, 3, 4, 23, 24),
'vnet1': (5, 6, 25, 26, 7, 8, 27, 28),
'vnet2': (9, 10, 29, 30, 11, 12, 31, 32),
}
interfaceStats = interface_stats.__getitem__
domain = mock.Mock()
domain.XMLDesc.return_value = dom_xml
domain.info.return_value = (0, 0, 0, 2, 999999)
domain.interfaceStats.side_effect = interfaceStats
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
interfaces = list(self.inspector.inspect_vnics(
self.instance, None))
self.assertEqual(3, len(interfaces))
vnic0 = interfaces[0]
self.assertEqual('vnet0', vnic0.name)
self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac)
self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref)
self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask'))
self.assertEqual('10.0.0.2', vnic0.parameters.get('ip'))
self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet'))
self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver'))
self.assertEqual(1, vnic0.rx_bytes)
self.assertEqual(2, vnic0.rx_packets)
self.assertEqual(3, vnic0.tx_bytes)
self.assertEqual(4, vnic0.tx_packets)
self.assertEqual(21, vnic0.rx_errors)
self.assertEqual(22, vnic0.rx_drop)
self.assertEqual(23, vnic0.tx_errors)
self.assertEqual(24, vnic0.tx_drop)
vnic1 = interfaces[1]
self.assertEqual('vnet1', vnic1.name)
self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac)
self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref)
self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask'))
self.assertEqual('192.168.0.2', vnic1.parameters.get('ip'))
self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet'))
self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver'))
self.assertEqual(5, vnic1.rx_bytes)
self.assertEqual(6, vnic1.rx_packets)
self.assertEqual(7, vnic1.tx_bytes)
self.assertEqual(8, vnic1.tx_packets)
self.assertEqual(25, vnic1.rx_errors)
self.assertEqual(26, vnic1.rx_drop)
self.assertEqual(27, vnic1.tx_errors)
self.assertEqual(28, vnic1.tx_drop)
vnic2 = interfaces[2]
self.assertEqual('vnet2', vnic2.name)
self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac)
self.assertIsNone(vnic2.fref)
self.assertEqual(
{'interfaceid': None, 'bridge': 'qbr420008b3-7c'},
vnic2.parameters)
self.assertEqual(9, vnic2.rx_bytes)
self.assertEqual(10, vnic2.rx_packets)
self.assertEqual(11, vnic2.tx_bytes)
self.assertEqual(12, vnic2.tx_packets)
self.assertEqual(29, vnic2.rx_errors)
self.assertEqual(30, vnic2.rx_drop)
self.assertEqual(31, vnic2.tx_errors)
self.assertEqual(32, vnic2.tx_drop)
def test_inspect_vnics_with_domain_shutoff(self):
domain = mock.Mock()
domain.info.return_value = (5, 0, 0, 2, 999999)
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
inspect = self.inspector.inspect_vnics
self.assertRaises(virt_inspector.InstanceShutOffException,
list, inspect(self.instance, None))
def test_inspect_disks(self):
dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='/path/instance-00000001/disk'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
</devices>
</domain>
"""
blockStatsFlags = {'wr_total_times': 91752302267,
'rd_operations': 6756,
'flush_total_times': 1310427331,
'rd_total_times': 29142253616,
'rd_bytes': 171460096,
'flush_operations': 746,
'wr_operations': 1437,
'wr_bytes': 13574656}
domain = mock.Mock()
domain.XMLDesc.return_value = dom_xml
domain.info.return_value = (0, 0, 0, 2, 999999)
domain.blockStats.return_value = (1, 2, 3, 4, -1)
domain.blockStatsFlags.return_value = blockStatsFlags
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
disks = list(self.inspector.inspect_disks(self.instance, None))
self.assertEqual(1, len(disks))
self.assertEqual('vda', disks[0].device)
self.assertEqual(1, disks[0].read_requests)
self.assertEqual(2, disks[0].read_bytes)
self.assertEqual(3, disks[0].write_requests)
self.assertEqual(4, disks[0].write_bytes)
self.assertEqual(91752302267, disks[0].wr_total_times)
self.assertEqual(29142253616, disks[0].rd_total_times)
def test_inspect_disks_with_domain_shutoff(self):
domain = mock.Mock()
domain.info.return_value = (5, 0, 0, 2, 999999)
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
inspect = self.inspector.inspect_disks
self.assertRaises(virt_inspector.InstanceShutOffException,
list, inspect(self.instance, None))
def test_inspect_disk_info(self):
dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='/path/instance-00000001/disk'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
</devices>
</domain>
"""
domain = mock.Mock()
domain.XMLDesc.return_value = dom_xml
domain.blockInfo.return_value = (1, 2, 3, -1)
domain.info.return_value = (0, 0, 0, 2, 999999)
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
disks = list(self.inspector.inspect_disk_info(
self.instance, None))
self.assertEqual(1, len(disks))
self.assertEqual('vda', disks[0].device)
self.assertEqual(3, disks[0].capacity)
self.assertEqual(2, disks[0].allocation)
self.assertEqual(3, disks[0].physical)
def test_inspect_disk_info_network_type(self):
dom_xml = """
<domain type='kvm'>
<devices>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='/path/instance-00000001/disk'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
</devices>
</domain>
"""
domain = mock.Mock()
domain.XMLDesc.return_value = dom_xml
domain.blockInfo.return_value = (1, 2, 3, -1)
domain.info.return_value = (0, 0, 0, 2, 999999)
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
disks = list(self.inspector.inspect_disk_info(self.instance, None))
self.assertEqual(1, len(disks))
def test_inspect_disk_info_without_source_element(self):
dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw' cache='none'/>
<backingStore/>
<target dev='hdd' bus='ide' tray='open'/>
<readonly/>
<alias name='ide0-1-1'/>
<address type='drive' controller='0' bus='1'
target='0' unit='1'/>
</disk>
</devices>
</domain>
"""
domain = mock.Mock()
domain.XMLDesc.return_value = dom_xml
domain.blockInfo.return_value = (1, 2, 3, -1)
domain.info.return_value = (0, 0, 0, 2, 999999)
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
disks = list(self.inspector.inspect_disk_info(self.instance, None))
self.assertEqual(0, len(disks))
def test_inspect_disks_without_source_element(self):
dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw' cache='none'/>
<backingStore/>
<target dev='hdd' bus='ide' tray='open'/>
<readonly/>
<alias name='ide0-1-1'/>
<address type='drive' controller='0' bus='1'
target='0' unit='1'/>
</disk>
</devices>
</domain>
"""
blockStatsFlags = {'wr_total_times': 91752302267,
'rd_operations': 6756,
'flush_total_times': 1310427331,
'rd_total_times': 29142253616,
'rd_bytes': 171460096,
'flush_operations': 746,
'wr_operations': 1437,
'wr_bytes': 13574656}
domain = mock.Mock()
domain.XMLDesc.return_value = dom_xml
domain.info.return_value = (0, 0, 0, 2, 999999)
domain.blockStats.return_value = (1, 2, 3, 4, -1)
domain.blockStatsFlags.return_value = blockStatsFlags
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
disks = list(self.inspector.inspect_disks(self.instance, None))
self.assertEqual(0, len(disks))
def test_inspect_memory_usage_with_domain_shutoff(self):
domain = mock.Mock()
domain.info.return_value = (5, 0, 51200, 2, 999999)
conn = mock.Mock()
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
self.assertRaises(virt_inspector.InstanceShutOffException,
self.inspector.inspect_instance,
self.instance, None)
def test_inspect_memory_with_empty_stats(self):
domain = mock.Mock()
domain.info.return_value = (0, 0, 51200, 2, 999999)
domain.memoryStats.return_value = {}
conn = mock.Mock()
conn.domainListGetStats.return_value = [({}, {})]
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
stats = self.inspector.inspect_instance(self.instance, None)
self.assertIsNone(stats.memory_usage)
self.assertIsNone(stats.memory_resident)
self.assertIsNone(stats.memory_swap_in)
self.assertIsNone(stats.memory_swap_out)
def test_inspect_memory_with_usable(self):
domain = mock.Mock()
domain.info.return_value = (0, 0, 0, 2, 999999)
domain.memoryStats.return_value = {'available': 76800,
'rss': 30000,
'swap_in': 5120,
'swap_out': 8192,
'unused': 25600,
'usable': 51200}
conn = mock.Mock()
conn.domainListGetStats.return_value = [({}, {})]
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
stats = self.inspector.inspect_instance(self.instance, None)
self.assertEqual(25600 / units.Ki, stats.memory_usage)
self.assertEqual(30000 / units.Ki, stats.memory_resident)
self.assertEqual(5120 / units.Ki, stats.memory_swap_in)
self.assertEqual(8192 / units.Ki, stats.memory_swap_out)
def test_inspect_perf_events_libvirt_less_than_2_3_0(self):
domain = mock.Mock()
domain.info.return_value = (0, 0, 51200, 2, 999999)
domain.memoryStats.return_value = {'rss': 0,
'available': 51200,
'unused': 25600}
conn = mock.Mock()
conn.domainListGetStats.return_value = [({}, {})]
conn.lookupByUUIDString.return_value = domain
with mock.patch('ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection', return_value=conn):
stats = self.inspector.inspect_instance(self.instance, None)
self.assertIsNone(stats.cpu_l3_cache_usage)
self.assertIsNone(stats.memory_bandwidth_total)
self.assertIsNone(stats.memory_bandwidth_local)
self.assertIsNone(stats.cpu_cycles)
self.assertIsNone(stats.instructions)
self.assertIsNone(stats.cache_references)
self.assertIsNone(stats.cache_misses)
class TestLibvirtInspectionWithError(base.BaseTestCase):
def setUp(self):
super(TestLibvirtInspectionWithError, self).setUp()
conf = service.prepare_service([], [])
self.useFixture(fixtures.MonkeyPatch(
'ceilometer.compute.virt.libvirt.utils.'
'refresh_libvirt_connection',
mock.MagicMock(side_effect=[None, Exception('dummy')])))
libvirt_inspector.libvirt = mock.Mock()
libvirt_inspector.libvirt.libvirtError = FakeLibvirtError
utils.libvirt = libvirt_inspector.libvirt
self.inspector = libvirt_inspector.LibvirtInspector(conf)
def test_inspect_unknown_error(self):
self.assertRaises(virt_inspector.InspectorException,
self.inspector.inspect_instance, 'foo', None)
|
2e2a8722fde06209fcd475b18ea9b26c26b018d9
|
c5fd80ede07f0972a9b99d0c65a0df40e6d487fa
|
/pyocd/debug/breakpoints/provider.py
|
0d2f9794f6a1234eb27b82b11b9b8605c29b33ee
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pyocd/pyOCD
|
46330f3a10c9be381293d220cc025e0e347513ce
|
9253740baf46ebf4eacbce6bf3369150c5fb8ee0
|
refs/heads/main
| 2023-08-18T07:56:54.205305
| 2023-08-13T19:11:01
| 2023-08-13T19:11:01
| 13,862,423
| 507
| 204
|
Apache-2.0
| 2023-09-09T20:13:57
| 2013-10-25T14:10:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,104
|
py
|
provider.py
|
# pyOCD debugger
# Copyright (c) 2015-2017 Arm Limited
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from ...core.target import Target
class Breakpoint:
def __init__(self, provider):
self.type: Target.BreakpointType = Target.BreakpointType.HW
self.enabled: bool = False
self.addr: int = 0
self.original_instr: int = 0
self.provider: BreakpointProvider = provider
def __repr__(self) -> str:
return "<%s@0x%08x type=%s addr=0x%08x>" % (self.__class__.__name__, id(self), self.type.name, self.addr)
class BreakpointProvider:
"""@brief Abstract base class for breakpoint providers."""
def init(self) -> None:
raise NotImplementedError()
@property
def bp_type(self) -> Target.BreakpointType:
raise NotImplementedError()
@property
def do_filter_memory(self) -> bool:
return False
@property
def available_breakpoints(self) -> int:
raise NotImplementedError()
def can_support_address(self, addr: int) -> bool:
raise NotImplementedError()
def find_breakpoint(self, addr: int) -> Optional[Breakpoint]:
raise NotImplementedError()
def set_breakpoint(self, addr: int) -> Optional[Breakpoint]:
raise NotImplementedError()
def remove_breakpoint(self, bp: Breakpoint) -> None:
raise NotImplementedError()
def filter_memory(self, addr: int, size: int, data: int) -> int:
return data
def flush(self) -> None:
pass
|
5c7165e686e8a17237c5df65fd37ab6c29c72af0
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/common/utils/nest_utils.py
|
2cdd1471b95308bc6b2c8c41bb99363f64011e90
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,667
|
py
|
nest_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilties for dealing with nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
def add_string_feature(key, values,
example):
example.features.feature[key].bytes_list.value.extend(values)
def add_int_feature(key, values,
example):
example.features.feature[key].int64_list.value.extend(values)
def add_float_feature(key, values,
example):
example.features.feature[key].float_list.value.extend(values)
def flat_dict_to_tf_example(inputs,
structure):
"""Convert a flat dictionary to a tf.Example.
Args:
inputs: A dictionary of flat numpy arrays.
structure: A nested structure of placeholders that have specified shapes.
Returns:
example: An example with the flattened inputs.
"""
flat_structure = nest_to_flat_dict(structure)
tf.nest.assert_same_structure(
nest1=flat_structure, nest2=inputs, check_types=False)
example = tf.train.Example()
dtype_map = {
tf.string: add_string_feature,
tf.int32: add_int_feature,
tf.int64: add_int_feature,
tf.float32: add_float_feature,
tf.float64: add_float_feature
}
for k, v in inputs.items():
placeholder = flat_structure[k]
assert placeholder.shape == v.shape
add_fn = dtype_map[placeholder.dtype]
add_fn(k, v.flatten(), example)
return example
def tf_example_to_structure(serialized_example,
structure):
"""Convert a serialized tf.Example into a nested structure of Tensors.
Args:
serialized_example: String tensor containing a serialized example.
structure: A nested structure of placeholders that have specified shapes.
Returns:
features: A nested structure of Tensors consistent with `structure`.
"""
flat_structure = nest_to_flat_dict(structure)
dtype_map = {
tf.string: tf.string,
tf.int32: tf.int64,
tf.int64: tf.int64,
tf.float32: tf.float32,
tf.float64: tf.float32
}
def _placeholder_to_feature(placeholder):
return tf.FixedLenFeature(
shape=np.prod(placeholder.shape, dtype=np.int32),
dtype=dtype_map[placeholder.dtype])
flat_feature_spec = {
k: _placeholder_to_feature(v) for k, v in flat_structure.items()
}
flat_features = tf.parse_single_example(serialized_example, flat_feature_spec)
flat_features = {
k: tf.reshape(tf.cast(flat_features[k], v.dtype), v.shape)
for k, v in flat_structure.items()
}
features = flat_dict_to_nest(flat_features, structure)
return features
def nest_to_flat_dict(nest):
"""Convert a nested structure into a flat dictionary.
Args:
nest: A nested structure.
Returns:
flat_dict: A dictionary with strings keys that can be converted back into
the original structure via `flat_dict_to_nest`.
"""
flat_sequence = tf.nest.flatten(nest)
return {str(k): v for k, v in enumerate(flat_sequence)}
def flat_dict_to_nest(flat_dict, structure):
"""Convert a nested structure into a flat dictionary.
Args:
flat_dict: A dictionary with string keys.
structure: A nested structure.
Returns:
nest: A nested structure that inverts `nest_to_flat_dict`.
"""
flat_sequence = [flat_dict[str(i)] for i in range(len(flat_dict))]
return tf.nest.pack_sequence_as(
structure=structure, flat_sequence=flat_sequence)
def assert_same(nest1, nest2):
"""Assert that both structures are equivalent.
This function is more strict than tf.nest.assert_same_structure since it also
requires that Tensors have the same dtype and shape.
Args:
nest1: A nested structure.
nest2: A nested structure.
"""
tf.nest.assert_same_structure(nest1, nest2)
for t1, t2 in zip(tf.nest.flatten(nest1), tf.nest.flatten(nest2)):
assert t1.dtype == t2.dtype
if isinstance(t1, tf.Tensor) and isinstance(t2, tf.Tensor):
assert t1.shape == t2.shape
|
a03a98f94e0ee86c38b2d89b11b97d4cddc78f1c
|
9cfab77b9c362d70d79110bb330794455f07e37c
|
/evaluation/evaluation.py
|
e45d26e7fa2eca119a7823082737757f5ff8cc0e
|
[
"Apache-2.0"
] |
permissive
|
twitter-research/tgn
|
bf6148b8e94a282edc9ce1d1e673ef41193b0b21
|
d55bbe678acabb9fc3879c408fd1f2e15919667c
|
refs/heads/master
| 2023-07-07T15:08:17.161711
| 2023-06-21T14:40:17
| 2023-06-21T14:40:17
| 282,072,618
| 764
| 178
|
Apache-2.0
| 2023-06-21T14:40:18
| 2020-07-23T22:49:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,406
|
py
|
evaluation.py
|
import math
import numpy as np
import torch
from sklearn.metrics import average_precision_score, roc_auc_score
def eval_edge_prediction(model, negative_edge_sampler, data, n_neighbors, batch_size=200):
# Ensures the random sampler uses a seed for evaluation (i.e. we sample always the same
# negatives for validation / test set)
assert negative_edge_sampler.seed is not None
negative_edge_sampler.reset_random_state()
val_ap, val_auc = [], []
with torch.no_grad():
model = model.eval()
# While usually the test batch size is as big as it fits in memory, here we keep it the same
# size as the training batch size, since it allows the memory to be updated more frequently,
# and later test batches to access information from interactions in previous test batches
# through the memory
TEST_BATCH_SIZE = batch_size
num_test_instance = len(data.sources)
num_test_batch = math.ceil(num_test_instance / TEST_BATCH_SIZE)
for k in range(num_test_batch):
s_idx = k * TEST_BATCH_SIZE
e_idx = min(num_test_instance, s_idx + TEST_BATCH_SIZE)
sources_batch = data.sources[s_idx:e_idx]
destinations_batch = data.destinations[s_idx:e_idx]
timestamps_batch = data.timestamps[s_idx:e_idx]
edge_idxs_batch = data.edge_idxs[s_idx: e_idx]
size = len(sources_batch)
_, negative_samples = negative_edge_sampler.sample(size)
pos_prob, neg_prob = model.compute_edge_probabilities(sources_batch, destinations_batch,
negative_samples, timestamps_batch,
edge_idxs_batch, n_neighbors)
pred_score = np.concatenate([(pos_prob).cpu().numpy(), (neg_prob).cpu().numpy()])
true_label = np.concatenate([np.ones(size), np.zeros(size)])
val_ap.append(average_precision_score(true_label, pred_score))
val_auc.append(roc_auc_score(true_label, pred_score))
return np.mean(val_ap), np.mean(val_auc)
def eval_node_classification(tgn, decoder, data, edge_idxs, batch_size, n_neighbors):
pred_prob = np.zeros(len(data.sources))
num_instance = len(data.sources)
num_batch = math.ceil(num_instance / batch_size)
with torch.no_grad():
decoder.eval()
tgn.eval()
for k in range(num_batch):
s_idx = k * batch_size
e_idx = min(num_instance, s_idx + batch_size)
sources_batch = data.sources[s_idx: e_idx]
destinations_batch = data.destinations[s_idx: e_idx]
timestamps_batch = data.timestamps[s_idx:e_idx]
edge_idxs_batch = edge_idxs[s_idx: e_idx]
source_embedding, destination_embedding, _ = tgn.compute_temporal_embeddings(sources_batch,
destinations_batch,
destinations_batch,
timestamps_batch,
edge_idxs_batch,
n_neighbors)
pred_prob_batch = decoder(source_embedding).sigmoid()
pred_prob[s_idx: e_idx] = pred_prob_batch.cpu().numpy()
auc_roc = roc_auc_score(data.labels, pred_prob)
return auc_roc
|
e7dfc19bffa77593eaef897a3e8850a3bfeff196
|
45d01a6c5fbf766ad4d996c044412dc2b268ef07
|
/autoimpute/imputations/series/default.py
|
4126e043ac97380638357fc9568aabfc8869bca5
|
[
"MIT"
] |
permissive
|
kearnz/autoimpute
|
2cf88d8cf4a1ab6b8b6579c8dca2ecd38eb1aaf9
|
6ef82663464aad187fd341fcace8e97bd0222aaf
|
refs/heads/master
| 2023-06-07T21:08:23.584459
| 2023-05-24T04:43:28
| 2023-05-24T04:43:28
| 168,429,609
| 245
| 27
|
MIT
| 2022-09-10T22:36:57
| 2019-01-30T23:09:47
|
Python
|
UTF-8
|
Python
| false
| false
| 15,652
|
py
|
default.py
|
"""This module implements default imputers used for series Imputer classes.
These Imputer classes serve as defaults within more advanced imputers. They
are flexible, and they allow users to quickly run imputations without getting
a runtime error as they would in sklearn if the data types in a dataset are
mixed. There are three default imputers at the moment: DefaultUnivarImputer,
DefaultTimeSeriesImputer and DefaultPredictiveImputer. Default imputers
inherit from DefaultBaseImputer.
"""
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
from sklearn.utils.validation import check_is_fitted
from autoimpute.imputations import method_names
from .pmm import PMMImputer
from .mean import MeanImputer
from .mode import ModeImputer
from .interpolation import InterpolateImputer
from .logistic_regression import MultinomialLogisticImputer
from .base import ISeriesImputer
methods = method_names
# pylint:disable=attribute-defined-outside-init
# pylint:disable=unnecessary-pass
# pylint:disable=dangerous-default-value
# pylint:disable=too-many-instance-attributes
class DefaultBaseImputer(ISeriesImputer):
"""Building blocks for the default imputers.
The DefaultBaseImputer is not a stand-alone class and thus serves no
purpose other than as a Parent to DefaultImputers. Therefore, the
DefaultBaseImputer should not be used directly unless creating a new
version of a DefaultImputer.
"""
def __init__(self, num_imputer, cat_imputer, num_kwgs, cat_kwgs):
"""Initialize the DefaultBaseImputer.
Args:
num_imputer (Imputer): valid Imputer for numerical data.
cat_imputer (Imputer): valid Imputer for categorical data.
num_kwgs (dict): Keyword args for numerical imputer.
cat_kwgs (dict): keyword args for categorical imputer.
Returns:
self. Instance of the class
"""
# INSTANCE ATTRIBUTES MUST BE IN ORDER THEY ARE VALIDATED WITH GET/SET
# --------------------------------------------------------------------
# Position of arguments in __init__ is essentially arbitrary
# But attribute must appear in proper order if using getters/setters
self.num_kwgs = num_kwgs
self.cat_kwgs = cat_kwgs
self.num_imputer = num_imputer
self.cat_imputer = cat_imputer
@property
def num_kwgs(self):
"""Property getter to return the value of num_kwgs."""
return self._num_kwgs
@property
def cat_kwgs(self):
"""Property getter to return the value of cat_kwgs."""
return self._cat_kwgs
@num_kwgs.setter
def num_kwgs(self, kwgs):
"""Validate the num_kwgs and set default properties."""
if not isinstance(kwgs, (type(None), dict)):
err = "num_kwgs must be dict of args used to init num_imputer."
raise ValueError(err)
self._num_kwgs = kwgs
@cat_kwgs.setter
def cat_kwgs(self, kwgs):
"""Validate the cat_kwgs and set default properties."""
if not isinstance(kwgs, (type(None), dict)):
err = "cat_kwgs must be dict of args used to init cat_imputer."
raise ValueError(err)
self._cat_kwgs = kwgs
@property
def num_imputer(self):
"""Property getter to return the value of the num imputer."""
return self._num_imputer
@property
def cat_imputer(self):
"""Property getter to return the value of the cat imputer."""
return self._cat_imputer
@num_imputer.setter
def num_imputer(self, imp):
"""Validate the num imputer and set default parameters.
Args:
imp (Imputer): must be a valid autoimpute Imputer
Raises:
ValueError: any Imputer class must end in Imputer
ValueError: Imputer must implement fit_impute
ValueError: argument not an instance of an Imputer
"""
# try necessary because imp may not have __base__ attribute
try:
# once imp confirmed class, error handling
cls_ = imp.__name__.endswith("Imputer")
if not cls_:
err = f"{imp} must be a class ending in Imputer"
raise ValueError(err)
# valid imputers must have `fit_impute` method
m = "fit_impute"
if not hasattr(imp, m):
err = f"Imputer must implement {m} method."
raise ValueError(err)
# if valid imputer, instantiate it with kwargs
# if kwargs contains improper args, imp will handle error
if self.num_kwgs is None:
self._num_imputer = imp()
else:
self._num_imputer = imp(**self.num_kwgs)
# deal with imp that doesn't have __base__ attribute
except AttributeError as ae:
err = f"{imp} is not an instance of an Imputer"
raise ValueError(err) from ae
@cat_imputer.setter
def cat_imputer(self, imp):
"""Validate the cat imputer and set default parameters.
Args:
imp (Imputer): must be a valid autoimpute imputer
Raises:
ValueError: any imputer class must end in Imputer
ValueError: imputer must implement fit_impute
ValueError: argument not an instance of an Imputer
"""
# try necessary because imp could initially be anything
try:
# once imp confirmed class, error handling
cls_ = imp.__name__.endswith("Imputer")
if not cls_:
err = f"{imp} must be an Imputer class from autoimpute"
raise ValueError(err)
# valid imputers must have `fit_impute` method
m = "fit_impute"
if not hasattr(imp, m):
err = f"Imputer must implement {m} method."
raise ValueError(err)
# if valid imputer, instantiate it with kwargs
# if kwargs contains improper args, imp will handle error
if self.cat_kwgs is None:
self._cat_imputer = imp()
else:
self._cat_imputer = imp(**self.cat_kwgs)
except AttributeError as ae:
err = f"{imp} is not a valid Imputer"
raise ValueError(err) from ae
def fit(self, X, y):
"""Fit the Imputer to the dataset and determine the right approach.
Args:
X (pd.Series): Dataset to fit the imputer, or predictors
y (pd.Series): None, or dataset to fit predictors
Returns:
self. Instance of the class.
"""
# start off with stats blank
stats = {"param": None, "strategy": None}
# if y is None, fitting simply X. univariate method.
if y is None:
if is_numeric_dtype(X):
stats = {"param": self.num_imputer.fit(X, y),
"strategy": self.num_imputer.strategy}
if is_string_dtype(X):
stats = {"param": self.cat_imputer.fit(X, y),
"strategy": self.cat_imputer.strategy}
# if y is not None, fitting X to y. predictive method.
if not y is None:
if is_numeric_dtype(y):
stats = {"param": self.num_imputer.fit(X, y),
"strategy": self.num_imputer.strategy}
if is_string_dtype(y):
stats = {"param": self.cat_imputer.fit(X, y),
"strategy": self.cat_imputer.strategy}
# return final stats
self.statistics_ = stats
return self
def impute(self, X):
"""Perform imputations using the statistics generated from fit.
The impute method handles the actual imputation. Missing values
in a given dataset are replaced with the respective mean from fit.
Args:
X (pd.Series): Dataset to impute missing data from fit.
Returns:
pd.Series -- imputed dataset.
"""
# check is fitted and delegate transformation to respective imputer
check_is_fitted(self, "statistics_")
imp = self.statistics_["param"]
# ensure that param is not none, which indicates time series column
if imp:
X_ = imp.impute(X)
return X_
def fit_impute(self, X, y):
"""Convenience method to perform fit and imputation in one go."""
return self.fit(X, y).impute(X)
class DefaultUnivarImputer(DefaultBaseImputer):
"""Impute missing data using default methods for univariate imputation.
This imputer is the default for univariate imputation. The imputer
determines how to impute based on the column type of each column in a
dataframe. The imputer can be used directly, but such behavior is
discouraged. DefaultUnivarImputer does not have the flexibility /
robustness of more complex imputers, nor is its behavior identical.
Preferred use is MultipleImputer(strategy="default univariate").
"""
# class variables
strategy = methods.DEFAULT_UNIVAR
def __init__(
self,
num_imputer=MeanImputer,
cat_imputer=ModeImputer,
num_kwgs=None,
cat_kwgs={"fill_strategy": "random"}
):
"""Create an instance of the DefaultUnivarImputer class.
The dataframe imputers delegate work to the DefaultUnivarImputer if
strategy="default univariate" The DefaultUnivarImputer then determines
how to impute numerical and categorical columns by default. It does so
by passing its arguments to the DefaultBaseImputer, which handles
validation and instantiation of numerical and categorical imputers.
Args:
num_imputer (Imputer, Optional): valid Imputer for numerical data.
Default is MeanImputer.
cat_imputer (Imputer, Optional): valid Imputer for categorical
data. Default is ModeImputer.
num_kwgs (dict, optional): Keyword args for numerical imputer.
Default is None.
cat_kwgs (dict, optional): keyword args for categorical imputer.
Default is {"fill_strategy": "random"}.
Returns:
self. Instance of class.
"""
# delegate to DefaultBaseImputer
DefaultBaseImputer.__init__(
self,
num_imputer=num_imputer,
cat_imputer=cat_imputer,
num_kwgs=num_kwgs,
cat_kwgs=cat_kwgs
)
def fit(self, X, y=None):
"""Defer fit to the DefaultBaseImputer."""
super().fit(X, y)
return self
def impute(self, X):
"""Defer transform to the DefaultBaseImputer."""
X_ = super().impute(X)
return X_
class DefaultTimeSeriesImputer(DefaultBaseImputer):
"""Impute missing data using default methods for time series.
This imputer is the default imputer for time series imputation. The
imputer determines how to impute based on the column type of each column
in a dataframe. The imputer can be used directly, but such behavior is
discouraged. DefaultTimeSeriesImputer does not have the flexibility /
robustness of more complex imputers, nor is its behavior identical.
Preferred use is MultipleImputer(strategy="default time").
"""
# class variables
strategy = methods.DEFAULT_TIME
def __init__(
self,
num_imputer=InterpolateImputer,
cat_imputer=ModeImputer,
num_kwgs={"fill_strategy": "linear"},
cat_kwgs={"fill_strategy": "random"}
):
"""Create an instance of the DefaultTimeSeriesImputer class.
The dataframe imputers delegate work to the DefaultTimeSeriesImputer
if strategy="default time". The DefaultTimeSeriesImputer then
determines how to impute numerical and categorical columns by default.
It does so by passing its arguments to the DefaultBaseImputer, which
handles validation and instantiation of default numerical and
categorical imputers.
Args:
num_imputer (Imputer, Optional): valid Imputer for numerical data.
Default is InterpolateImputer.
cat_imputer (Imputer, Optional): valid Imputer for categorical
data. Default is ModeImputer.
num_kwgs (dict, optional): Keyword args for numerical imputer.
Default is {"strategy": "linear"}.
cat_kwgs (dict, optional): keyword args for categorical imputer.
Default is {"fill_strategy": "random"}.
Returns:
self. Instance of class.
"""
DefaultBaseImputer.__init__(
self,
num_imputer=num_imputer,
cat_imputer=cat_imputer,
num_kwgs=num_kwgs,
cat_kwgs=cat_kwgs
)
def fit(self, X, y=None):
"""Defer fit to the DefaultBaseImputer."""
super().fit(X, y)
return self
def impute(self, X):
"""Defer transform to the DefaultBaseImputer."""
X_ = super().impute(X)
return X_
class DefaultPredictiveImputer(DefaultBaseImputer):
"""Impute missing data using default methods for prediction.
This imputer is the default imputer for the MultipleImputer class. When
an end-user does not supply a strategy, the DefaultPredictiveImputer
determines how to impute based on the column type of each column in a
dataframe. The imputer can be used directly, but such behavior is
discouraged. DefaultPredictiveImputer does not have the flexibility /
robustness of more complex imputers, nor is its behavior identical.
Preferred use is MultipleImputer(strategy="default predictive").
"""
# class variables
strategy = methods.DEFAULT_PRED
def __init__(
self,
num_imputer=PMMImputer,
cat_imputer=MultinomialLogisticImputer,
num_kwgs=None,
cat_kwgs=None
):
"""Create an instance of the DefaultPredictiveImputer class.
The dataframe imputers delegate work to DefaultPredictiveImputer if
strategy="default predictive" or no strategy given when class is
instantiated. The DefaultPredictiveImputer determines how to impute
numerical and categorical columns by default. It does so by passing
its arguments to the DefaultBaseImputer, which handles validation and
instantiation of default numerical and categorical imputers.
Args:
num_imputer (Imputer, Optional): valid Imputer for numerical data.
Default is PMMImputer.
cat_imputer (Imputer, Optional): valid Imputer for categorical
data. Default is MultiLogisticImputer.
num_kwgs (dict, optional): Keyword args for numerical imputer.
Default is None.
cat_kwgs (dict, optional): keyword args for categorical imputer.
Default is None.
Returns:
self. Instance of class.
"""
# delegate to DefaultBaseImputer
DefaultBaseImputer.__init__(
self,
num_imputer=num_imputer,
cat_imputer=cat_imputer,
num_kwgs=num_kwgs,
cat_kwgs=cat_kwgs
)
def fit(self, X, y):
"""Defer fit to the DefaultBaseImputer."""
super().fit(X, y)
return self
def impute(self, X):
"""Defer transform to the DefaultBaseImputer."""
X_ = super().impute(X)
return X_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.