blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd1739691d3ca4cfd195ff77c6eb486eff52bafe
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow_LightGBM_Scipy_nightly/source/index.py
|
359f1907cc26256fc67235fd492d5e917570632b
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 77
|
py
|
index.py
|
import tensorflow
import lightgbm
def handler(event, context):
return 0
|
55381a2ce301e272d1e44395624b8ecc1c2ba531
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Sixgill-Darkfeed/Scripts/SixgillSearchIndicators/SixgillSearchIndicators.py
|
71ee5158567bf057f53858b053496ad531f3e2ef
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
SixgillSearchIndicators.py
|
import demistomock as demisto
from CommonServerPython import *
DEFAULT_SIZE = 50
def search_indicators(args):
keys = ['id', 'value', 'CustomFields', 'type', 'score', 'firstSeen', 'lastSeen',
'expiration', 'expirationStatus', 'sourceBrands', 'sourceInstances']
query = args.get('query', None)
if not query:
raise ValueError('Query not set!')
size = int(args.get('size', DEFAULT_SIZE))
indicators = demisto.executeCommand("findIndicators", {"query": query, 'size': size})
outputs = list()
if not isinstance(indicators, list) or len(indicators) < 1 or 'Contents' not in indicators[0]:
raise ValueError('No content')
for i in indicators[0]['Contents']:
oi = dict()
for k in i.keys():
if k in keys:
oi[k] = i[k]
outputs.append(oi)
return CommandResults(
outputs_prefix='FoundIndicators',
outputs_key_field='value',
outputs=outputs
)
def main(args):
try:
return_results(search_indicators(args))
except Exception as e:
return_error(f'Error : {str(e)}')
if __name__ in ('builtins', '__builtin__'):
main(demisto.args())
|
3e0e87223b39b25e1bfbcf2e1af8b64b17a8ffcc
|
2be164d2b1e34553064b933d4015fae80c1aae18
|
/Dk_TanD.py
|
a4683b5a19738c9b722106721fd3bc4ffaa25120
|
[
"MIT"
] |
permissive
|
linmingchih/HowtoSim_Script
|
a8c14de9f1c70152c1676abb87b15b3d429d6eb9
|
8f6abbcd9c2ad8c76e97357cd622f9f2ca87da7c
|
refs/heads/master
| 2023-04-03T07:27:04.067901
| 2023-03-19T09:35:53
| 2023-03-19T09:35:53
| 168,629,194
| 118
| 100
| null | 2021-07-27T09:58:01
| 2019-02-01T02:19:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,627
|
py
|
Dk_TanD.py
|
relative_permittivity='4.11483+0.0281238*ln((2.53303e+22+Freq*Freq)/(4.95658e+09+Freq*Freq))'
bulk_conductivity='1e-12+3.12919e-12*Freq*(atan(Freq/70403)-atan(Freq/1.59155e+11))'
import ScriptEnv, random, string
def calculate(relative_permittivity,bulk_conductivity):
from math import *
ln=log
F=[pow(10,i/10) for i in range(30, 130)]
x=relative_permittivity
y=bulk_conductivity
Dk=eval('[{} for Freq in F]'.format(x))
Cond=eval('[{} for Freq in F]'.format(y))
td=[Cond_p/(2*pi*Freq)/(Dk_p*8.8542e-12) for Cond_p, Freq, Dk_p in zip(Cond,F,Dk)]
with open("Dk_tanD.csv",'w') as f:
f.write('Freq, Dk, TanD\n')
for i,j,k in zip(F,Dk,td):
f.write(str(i)+', ' + str(j)+', '+str(k)+'\n')
generate_plot()
def generate_plot():
ScriptEnv.Initialize("Ansoft.ElectronicsDesktop")
oDesktop.RestoreWindow()
oProject = oDesktop.GetActiveProject()
oDesign = oProject.GetActiveDesign()
oModule = oDesign.GetModule("Solutions")
name= ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(6)])
oModule.ImportTable("Dk_tanD.csv", name, "Table", True, True, ["Freq", "Dk", "TanD"], [True, False, False])
oModule = oDesign.GetModule("ReportSetup")
oModule.CreateReport(name, "Modal Solution Data", "Rectangular Plot", name+" : Table", [],
[
"Tb(Freq):=" , ["All"]
],
[
"X Component:=" , "Tb(Freq)",
"Y Component:=" , ["Tb(Dk)","Tb(TanD)"]
], [])
oModule.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:Trace",
[
"NAME:PropServers",
name+":Tb(TanD)"
],
[
"NAME:ChangedProps",
[
"NAME:Y Axis",
"Value:=" , "Y2"
]
]
]
])
oModule.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:Scaling",
[
"NAME:PropServers",
name+":AxisX"
],
[
"NAME:ChangedProps",
[
"NAME:Axis Scaling",
"Value:=" , "Log"
]
]
]
])
calculate(relative_permittivity,bulk_conductivity)
|
6266a969cb65ab23e59c13d4da1182aa2ab19bf2
|
3abc1fef99ac6ce0b845a1090fae7f6875fee729
|
/src/ralph/operations/migrations/0002_auto_20151218_1029.py
|
c1b7208de4551c52b0de7ededa017fe3059b9067
|
[
"Apache-2.0"
] |
permissive
|
allegro/ralph
|
5ff9165a202e836061c99e8af20214e0d651622f
|
b4a72356f527b1f12c7babd7465d2d7fa3ffb0d3
|
refs/heads/ng
| 2023-09-02T01:13:43.672554
| 2023-09-01T09:48:38
| 2023-09-01T09:48:38
| 4,359,038
| 1,970
| 617
|
Apache-2.0
| 2023-09-01T09:44:39
| 2012-05-17T14:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
0002_auto_20151218_1029.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
OPERATION_TYPES = (
(1, 'Change', []),
(101, 'Incident', []),
(201, 'Problem', []),
(301, 'Failure', [
(302, 'Hardware Failure', [
(303, 'Disk', []),
(304, 'Controller', []),
(305, 'RAM', []),
(306, 'Eth card', [
(307, 'Eth card 1Gb', []),
(308, 'Eth card 10Gb', []),
]),
(309, 'Management Module', []),
(310, 'Power supply', []),
(311, 'Fan', []),
(312, 'SFP', []),
(313, 'Motherboard', []),
(314, 'Firmware upgrade', []),
(315, 'Backplane', []),
])
]),
)
def load_operation(model, obj_id, name, parent, children):
obj = model.objects.create(
id=obj_id,
pk=obj_id,
name=name,
parent=parent,
**{'lft': 0, 'rght': 0, 'level': 0, 'tree_id': 0}
)
for child_id, child_name, child_children in children:
load_operation(model, child_id, child_name, obj, child_children)
def load_initial_data(apps, schema_editor):
OperationType = apps.get_model("operations", "OperationType")
for op_id, op_name, op_children in OPERATION_TYPES:
load_operation(OperationType, op_id, op_name, None, op_children)
def unload_initial_data(apps, schema_editor):
OperationType = apps.get_model("operations", "OperationType")
OperationType.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('operations', '0001_initial'),
]
operations = [
migrations.RunPython(
load_initial_data, reverse_code=unload_initial_data
),
]
|
77aee86a09cc03ed876a131cad0473b103de9be8
|
f1872915f044e9bc8d6622d529535441ea8aec6a
|
/tests/opengl/items/common.py
|
3e6e078753230dc5972d4f19dab027234d872ee5
|
[
"MIT"
] |
permissive
|
pyqtgraph/pyqtgraph
|
5dc14ddd513f4f3fdd0e834aba720e61b122c886
|
f261280905a74f6cae4a43e39fa1732635b25c63
|
refs/heads/master
| 2023-08-30T23:24:30.305478
| 2023-08-29T00:35:59
| 2023-08-29T00:35:59
| 12,777,496
| 3,432
| 1,184
|
NOASSERTION
| 2023-09-14T13:40:51
| 2013-09-12T07:18:21
|
Python
|
UTF-8
|
Python
| false
| false
| 211
|
py
|
common.py
|
from pyqtgraph.opengl.GLGraphicsItem import GLGraphicsItem
def ensure_parentItem(parent: GLGraphicsItem, child: GLGraphicsItem):
assert child in parent.childItems()
assert parent is child.parentItem()
|
83c930c47d120ff3a6ead0adae79c796799a457c
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/perf/cli_tools/tbmv3/trace_downloader.py
|
f6f0c3fed14b24f73bd979a6b378ea86dc06be6d
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 4,649
|
py
|
trace_downloader.py
|
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from py_utils import cloud_storage
_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_TRACE_DIR = os.path.join(_SCRIPT_DIR, 'traces')
HTML_URL_PREFIX = ('https://storage.cloud.google.com/chrome-telemetry-output/')
def _GetSubpathInBucket(html_url):
"""Returns the path minus the HTML_URL_PREFIX.
Given https://storage.../chrome-telemetry-output/foo/bar/trace.html,
it returns foo/bar/trace.html."""
if not html_url.startswith(HTML_URL_PREFIX):
raise Exception('Html trace url must start with %s' % HTML_URL_PREFIX)
return html_url.replace(HTML_URL_PREFIX, "")
def _GetProtoTraceLinkFromTraceEventsDir(link_prefix):
"""Returns the first proto trace in |link_prefix|/trace/traceEvents/"""
proto_link_prefix = '/'.join([link_prefix, 'trace/traceEvents/**'])
try:
for link in cloud_storage.List(cloud_storage.TELEMETRY_OUTPUT,
proto_link_prefix):
if link.endswith('.pb.gz') or link.endswith('.pb'):
return link[1:] # Strip the initial '/'.
except cloud_storage.NotFoundError as e:
# This directory doesn't exist at all.
raise cloud_storage.NotFoundError('No URLs match the prefix %s: %s' %
(proto_link_prefix, str(e)))
# The directory exists, but no proto trace found.
raise cloud_storage.NotFoundError(
'Proto trace not found in cloud storage. Path: %s.' % proto_link_prefix)
def GetFileExtension(file_path):
"""Given foo/bar/baz.pb.gz, returns '.pb.gz'."""
# Get the filename only because the directory names can contain "." like
# "v8.browsing".
filename = file_path.split('/')[-1]
first_dot_index = filename.find('.')
if first_dot_index == -1:
return ''
return filename[first_dot_index:]
def GetLocalTraceFileName(html_url):
"""Returns a local filename derived from the html trace url.
Given https://storage.../chrome-telemetry-output/foo/bar/trace.html, it
returns foo_bar_trace as the local filename. The filename does not contain
extensions. It's up to the caller to add .html or .pb etc."""
subpath = _GetSubpathInBucket(html_url)
extension = GetFileExtension(subpath)
no_extension_subpath = subpath[:-len(extension)]
return '_'.join(no_extension_subpath.split('/'))
def FindProtoTracePath(html_url):
"""
Finds the proto trace path given a html trace url.
In the simple case foo/bar/trace.pb is the proto trace for foo/bar/trace.html.
But sometimes that's not available so we have to look for a .pb.gz file in a
special directory."""
subpath = _GetSubpathInBucket(html_url)
if subpath.endswith('trace.html'):
proto_path = subpath.replace('trace.html', 'trace.pb')
if cloud_storage.Exists(cloud_storage.TELEMETRY_OUTPUT, proto_path):
return proto_path
proto_path += '.gz'
if cloud_storage.Exists(cloud_storage.TELEMETRY_OUTPUT, proto_path):
return proto_path
directory_path = '/'.join(subpath.split('/')[:-1])
return _GetProtoTraceLinkFromTraceEventsDir(directory_path)
def DownloadHtmlTrace(html_url, download_dir=DEFAULT_TRACE_DIR):
"""Downloads html trace given the url. Returns local path.
Skips downloading if file was already downloaded once."""
local_filename = os.path.join(download_dir, GetLocalTraceFileName(html_url))
local_path = local_filename + '.html'
if os.path.exists(local_path):
logging.info('%s already downloaded. Skipping.' % local_path)
return local_path
remote_path = _GetSubpathInBucket(html_url)
if not cloud_storage.Exists(cloud_storage.TELEMETRY_OUTPUT, remote_path):
raise cloud_storage.NotFoundError(
'HTML trace %s not found in cloud storage.' % html_url)
cloud_storage.Get(cloud_storage.TELEMETRY_OUTPUT, remote_path, local_path)
return local_path
def DownloadProtoTrace(html_url, download_dir=DEFAULT_TRACE_DIR):
"""Downloads the associated proto trace for html trace url. Returns path.
Skips downloading if file was already downloaded once."""
local_filename = os.path.join(download_dir, GetLocalTraceFileName(html_url))
for local_path in [local_filename + '.pb', local_filename + '.pb.gz']:
if os.path.exists(local_path):
logging.info('%s already downloaded. Skipping.' % local_path)
return local_path
remote_path = FindProtoTracePath(html_url)
extension = GetFileExtension(remote_path)
local_path = local_filename + extension
cloud_storage.Get(cloud_storage.TELEMETRY_OUTPUT, remote_path, local_path)
return local_path
|
1d35f1aa07311d7750d8c92a5190adff752f71ef
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowMacsecInterface/cli/equal/golden_output_3_expected.py
|
5a285a43b52fd922042d502b2ea1ea6430a37bfa
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
golden_output_3_expected.py
|
expected_output = {
"macsec-data": {
"status": "disabled"
},
"capabilities": {
"icv-length": "16",
"data-length-change-supported": "yes",
"max-rx-sa": "32",
"max-tx-sa": "32",
"max-rx-sc": "16",
"max-tx-sc": "16",
"validate-frames": "disabled",
"pn-threshold-notification-support": "Yes",
"ciphers-supported": [
"GCM-AES-128",
"GCM-AES-256",
"GCM-AES-XPN-128",
"GCM-AES-XPN-256",
],
},
"access-control": "must secure",
}
|
36bb81b7f88ac084b01f9f36c00ff458cb80541f
|
e5dd9568e49cc88d13e4a1255ca5a7763feafb72
|
/theonionbox/tob/system/darwin/__init__.py
|
f5e165219e0345ea2d92e8600c23a5f55ca573c4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
ralphwetzel/theonionbox
|
21bc8142394a55492ca82dcd1430d22366703bcb
|
9812fce48153955e179755ea7a58413c3bee182f
|
refs/heads/master
| 2023-08-09T09:16:21.665677
| 2020-01-19T16:43:13
| 2020-01-19T16:43:13
| 48,622,560
| 130
| 23
|
MIT
| 2023-07-26T06:46:51
| 2015-12-26T20:20:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 8,068
|
py
|
__init__.py
|
from typing import Optional, Callable
from datetime import datetime, timedelta
import logging
import re
import subprocess
import time
from .. import BaseSystem
from .osxtemp import Temperature, Sensors, Units
from .systray import Icon
class Darwin(BaseSystem):
temp_function = None
def __init__(self):
super(Darwin, self).__init__()
self.__ntp = None
self.__uptime = None
@property
def system(self):
return 'Darwin'
@property
def temperature(self) -> Optional[float]:
if self.temp_function is None:
try:
self.temp_function = Temperature(Sensors.CPU_0_PROXIMITY, Units.CELSIUS)
except OSError:
log = logging.getLogger('theonionbox')
log.debug('macOSX SMC access library not found. Please check README for further instructions.')
if self.temp_function is not None:
try:
return float(self.temp_function())
except:
pass
return None
@property
def uptime(self) -> Optional[str]:
if self.__uptime is None:
try:
uptime = subprocess.check_output('uptime', shell=True)
uptime = uptime.decode("utf-8")
# uptime return format is ... complex!
# 17:35 up 5:10, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 14 mins, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 1 min, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 7 days, 5:10, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 7 days, 14 mins, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 7 days, 1 min, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 17 days, 5:10, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 17 days, 14 mins, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 17 days, 1 min, 1 user, load averages: 4.03 2.47 1.97
# 17:35 up 1 day, 5:10, 1 user, load averages: 4.03 2.47 1.97
uptime = re.findall('(\d+:\d+)(?: {1,2}up {1,2})(?:(\d+)(?: days?, ))?(?:(\d+:\d+)|(?:(\d+)(?: mins?))),', uptime)
# we just expect one match!
if len(uptime) == 1:
uptime = uptime[0]
except Exception as exc:
pass
else:
# Uptime RegEx tuple: (Timestamp, Days, hours:mins, mins)
# hours:mins and mins are mutually exclusive!
if len(uptime) == 4:
(ts, days, hours, mins) = uptime
if hours != '':
hours = hours.split(':')
mins = hours[1]
hours = hours[0]
days = days or '0'
hours = ('00{}'.format(hours))[-2:]
mins = ('00{}'.format(mins))[-2:]
its_now = datetime.fromtimestamp(time.time())
upt_diff = timedelta(days=int(days),
hours=int(hours),
minutes=int(mins))
upt = its_now - upt_diff
self.__uptime = upt.strftime('%Y-%m-%d %H:%M')
return self.__uptime
@property
def ntp(self) -> Optional[str]:
if self.__ntp is None:
# find potential interfaces
try:
networksetup = subprocess.check_output(['networksetup', '-listallhardwareports'])
networksetup = networksetup.decode("utf-8")
# Hardware Port: Wi-Fi
# Device: en0
# Ethernet Address: ...
#
# Hardware Port: Bluetooth PAN
# Device: en6
# Ethernet Address: ...
#
# Hardware Port: Thunderbolt 1
# Device: en3
# Ethernet Address: ...
#
# ...
regex = r'Device: (.+)'
interfaces = re.findall(regex, networksetup)
except subprocess.CalledProcessError:
return None
# check if there's lease information for those interfaces
for interface in interfaces:
try:
ipconfig = subprocess.check_output(['ipconfig', 'getpacket', interface])
ipconfig = ipconfig.decode("utf-8")
# op = BOOTREPLY
# htype = 1
# flags = 0
# hlen = 6
# hops = 0
# xid = ...
# secs = 2
# ciaddr = 0.0.0.0
# yiaddr = 192.168.178.xx
# siaddr = 192.168.178.1
# giaddr = 0.0.0.0
# chaddr = ...
# sname =
# file =
# options:
# Options count is 12
# dhcp_message_type (uint8): ACK 0x5
# server_identifier (ip): 192.168.178.1
# lease_time (uint32): 0x112380
# renewal_t1_time_value (uint32): 0x891c0
# rebinding_t2_time_value (uint32): 0xeff10
# subnet_mask (ip): 255.255.255.0
# router (ip_mult): {192.168.178.1}
# domain_name_server (ip_mult): {192.168.178.1}
# domain_name (string): ...
# broadcast_address (ip): 192.168.178.255
# network_time_protocol_servers (ip_mult): {192.168.178.1, 192.168.178.2} <===
# end (none):
regex = r"network_time_protocol_servers \(ip_mult\): \{((?:\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3},? ?)+)\}"
ntp_servers = re.findall(regex, ipconfig)
# 192.168.178.1, 192.168.178.2
if len(ntp_servers) > 0:
regex = r"(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})(?:,? ?)"
ntps = re.findall(regex, ntp_servers[0])
if len(ntps) > 0:
self.__ntp = ntps[0]
break
except Exception:
continue
return self.__ntp
# This is Test code!
def run_with_icon(self, launch, shutdown):
from . import Icon
import pystray
from functools import partial
import os
def on_openBox(icon, item, self):
os.system(f"open /Applications/Safari.app {self.url}")
menu = pystray.Menu(
pystray.MenuItem('Show TheOnionBox...', partial(on_openBox, self=self))
)
icon = Icon('The Onion Box', menu=menu)
from PIL import Image, ImageDraw
def create_image():
# Generate an image and draw a pattern
width = 41
height = 41
color1 = 0x000000
color2 = 0xffffff
image = Image.new('RGB', (width, height), color1)
dc = ImageDraw.Draw(image)
dc.rectangle(
(width // 2, 0, width, height // 2),
fill=color2)
dc.rectangle(
(0, height // 2, width // 2, height),
fill=color2)
return image
icon.icon = create_image()
# Prevent app from showing up in the dock
# https://stackoverflow.com/questions/4345102/how-to-hide-application-icon-from-mac-os-x-dock
from AppKit import NSBundle
bundle = NSBundle.mainBundle()
info = bundle.localizedInfoDictionary() or bundle.infoDictionary()
info['LSUIElement'] = '1'
def run_call(icon):
if icon is not None:
icon.visible = True
launch()
icon.run(run_call, shutdown)
|
d16633773505edf72490ed3588bd5389cfefc749
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/networkcloud/azext_networkcloud/tests/latest/test_rack.py
|
8bd9116e4f54f3da60a5b9e1632a4f0a457e579f
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,989
|
py
|
test_rack.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,unnecessary-pass,unused-argument
"""
Rack test scenarios
"""
from azure.cli.testsdk import ResourceGroupPreparer, ScenarioTest
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from .config import CONFIG
def setup_scenario1(test):
"""Env setup_scenario1"""
pass
def cleanup_scenario1(test):
"""Env cleanup_scenario1"""
pass
def call_scenario1(test):
"""# Testcase: scenario1"""
setup_scenario1(test)
step_update(
test,
checks=[
test.check("tags", "{tagsUpdate}"),
test.check("provisioningState", "Succeeded"),
],
)
step_show(test, checks=[])
step_list_subscription(test)
step_list_resource_group(test, checks=[])
cleanup_scenario1(test)
def step_show(test, checks=None):
"""Rack show operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud rack show --name {name} " "--resource-group {rg}",
checks=checks,
)
def step_list_resource_group(test=None, checks=None):
"""Rack list by resource group operation"""
if checks is None:
checks = []
test.cmd("az networkcloud rack list --resource-group {rg}")
@AllowLargeResponse
def step_list_subscription(test):
"""Rack list by subscription operation"""
test.cmd("az networkcloud rack list")
def step_update(test, checks=None):
"""Rack update operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud rack update --name {name} "
"--rack-location {rackLocation} "
"--rack-serial-number {serialNumber} "
"--tags {tagsUpdate} --resource-group {rg}"
)
# As Rack is a hydrated resource, it won't be provisioned in a testing rg
# instead, we will use a resource created as a part of cluster deployment for testing
class RackScenarioTest(ScenarioTest):
"""Rack scenario test"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kwargs.update(
{
"name": CONFIG.get("RACK", "name"),
"location": CONFIG.get("RACK", "location"),
"rackLocation": CONFIG.get("RACK", "rack_location"),
"rg": CONFIG.get("RACK", "resource_group"),
"serialNumber": CONFIG.get("RACK", "serial_number"),
"tags": CONFIG.get("RACK", "tags"),
"tagsUpdate": CONFIG.get("RACK", "tags_update"),
}
)
def test_rack_scenario1(self):
"""test scenario for Rack read and update operations"""
call_scenario1(self)
|
34ffb95470460cd55f44a47d72c2af92beca5e36
|
e338fad18e9bdd6f0252c9306b05ad7b336c2ecd
|
/daqhats/hats.py
|
4a80889e0c8b9e1007731ad82e37bcaf7524b4dc
|
[
"MIT"
] |
permissive
|
mccdaq/daqhats
|
fd22172bf89adff9e2f6d2c4a9198c9d39918ed7
|
e6d96c8fb621c83696536a037f1c4fa373c46068
|
refs/heads/master
| 2022-12-01T16:47:45.491181
| 2022-11-18T15:05:17
| 2022-11-18T15:05:17
| 144,043,320
| 112
| 83
|
NOASSERTION
| 2022-06-24T18:42:34
| 2018-08-08T16:57:41
|
C
|
UTF-8
|
Python
| false
| false
| 12,724
|
py
|
hats.py
|
"""
Wraps the global methods from the MCC Hat library for use in Python.
"""
from collections import namedtuple
from ctypes import cdll, Structure, c_ubyte, c_ushort, c_char, c_int, POINTER, \
CFUNCTYPE, c_void_p
from enum import IntEnum, unique
_HAT_CALLBACK = None
@unique
class HatIDs(IntEnum):
"""Known MCC HAT IDs."""
ANY = 0 #: Match any MCC ID in :py:func:`hat_list`
MCC_118 = 0x0142 #: MCC 118 ID
MCC_128 = 0x0146 #: MCC 128 ID
MCC_134 = 0x0143 #: MCC 134 ID
MCC_152 = 0x0144 #: MCC 152 ID
MCC_172 = 0x0145 #: MCC 172 ID
@unique
class TriggerModes(IntEnum):
"""Scan trigger input modes."""
RISING_EDGE = 0 #: Start the scan on a rising edge of TRIG.
FALLING_EDGE = 1 #: Start the scan on a falling edge of TRIG.
ACTIVE_HIGH = 2 #: Start the scan any time TRIG is high.
ACTIVE_LOW = 3 #: Start the scan any time TRIG is low.
class OptionFlags(IntEnum):
"""Scan / read option flags. See individual methods for detailed
descriptions."""
DEFAULT = 0x0000 #: Use default behavior.
NOSCALEDATA = 0x0001 #: Read / write unscaled data.
NOCALIBRATEDATA = 0x0002 #: Read / write uncalibrated data.
EXTCLOCK = 0x0004 #: Use an external clock source.
EXTTRIGGER = 0x0008 #: Use an external trigger source.
CONTINUOUS = 0x0010 #: Run until explicitly stopped.
TEMPERATURE = 0x0020 #: Return temperature (MCC 134)
# exception class
class HatError(Exception):
"""
Exceptions raised for MCC DAQ HAT specific errors.
Args:
address (int): the address of the board that caused the exception.
value (str): the exception description.
"""
def __init__(self, address, value):
super(HatError, self).__init__(value)
self.address = address
self.value = value
def __str__(self):
return "Addr {}: ".format(self.address) + self.value
# HAT info structure class
class _Info(Structure): # pylint: disable=too-few-public-methods
_fields_ = [("address", c_ubyte),
("id", c_ushort),
("version", c_ushort),
("product_name", c_char * 256)]
# Callback function class
class HatCallback(object):
"""
DAQ HAT interrupt callback function class.
This class handles passing Python functions to the shared library as a
callback. It stores the user data internally and passes it to the callback
function to avoid issues with passing the object through the library.
The callback function should have a single argument (optional) that is a
Python object the user provides to :py:func:`interrupt_callback_enable` that
will be passed to the callback function whenever it is called.
Args:
function (function): the function to be called when an interrupt occurs.
"""
def __init__(self, function):
"""
Store the function and create the CFUNCTYPE.
"""
if not callable(function):
raise TypeError("Argument 1 must be a function or method.")
self.function = function
self.cbfunctype = CFUNCTYPE(None)
self.cbfunc = None
self.user_data = None
def get_callback_func(self):
"""
Create a wrapper function without the self argument since that can't
get passed to the library function, and assign it to a variable to
avoid it getting garbage collected.
"""
def func():
"""
Function wrapper.
"""
self.handle_callback()
self.cbfunc = self.cbfunctype(func)
return self.cbfunc
def handle_callback(self):
"""
This is directly called from the interrupt thread. It calls the user's
callback, passing the user_data object that gets set with
interrupt_callback_enable().
"""
self.function(self.user_data)
def _load_daqhats_library():
"""
Load the library
"""
libname = 'libdaqhats.so.1'
try:
lib = cdll.LoadLibrary(libname)
except: # pylint: disable=bare-except
lib = 0
return lib
def hat_list(filter_by_id=0):
"""
Return a list of detected DAQ HAT boards.
Scans certain locations for information from the HAT EEPROMs. Verifies the
contents are valid HAT EEPROM contents and returns a list of namedtuples
containing information on the HAT. Info will only be returned for DAQ HATs.
The EEPROM contents are stored in /etc/mcc/hats when using the
daqhats_read_eeproms tool, or in /proc/device-tree in the case of a single
HAT at address 0.
Args:
filter_by_id (int): If this is :py:const:`HatIDs.ANY` return all DAQ
HATs found. Otherwise, return only DAQ HATs with ID matching this
value.
Returns:
list: A list of namedtuples, the number of elements match the number of
DAQ HATs found. Each namedtuple will contain the following field names:
* **address** (int): device address
* **id** (int): device product ID, identifies the type of DAQ HAT
* **version** (int): device hardware version
* **product_name** (str): device product name
"""
_libc = _load_daqhats_library()
if _libc == 0:
return []
_libc.hat_list.argtypes = [c_ushort, POINTER(_Info)]
_libc.hat_list.restype = c_int
# find out how many structs we need
count = _libc.hat_list(filter_by_id, None)
if count == 0:
return []
# allocate array of Info structs
my_info = (_Info * count)()
# get the info
count = _libc.hat_list(filter_by_id, my_info)
# create the list of dictionaries to return
my_list = []
hat_info = namedtuple('HatInfo',
['address', 'id', 'version', 'product_name'])
for item in my_info:
info = hat_info(
address=item.address,
id=item.id,
version=item.version,
product_name=item.product_name.decode('ascii'))
my_list.append(info)
return my_list
def interrupt_state():
"""
Read the current DAQ HAT interrupt status
Returns the status of the interrupt signal, True if active or False if
inactive. The signal can be shared by multiple DAQ HATs so the status of
each board that may generate an interrupt must be read and the interrupt
source(s) cleared before the interrupt will become inactive.
This function only applies when using devices that can generate an
interrupt:
* MCC 152
Returns:
bool: The interrupt status.
"""
_libc = _load_daqhats_library()
if _libc == 0:
return False
_libc.hat_interrupt_state.argtypes = []
_libc.hat_interrupt_state.restype = c_int
# get the info
state = _libc.hat_interrupt_state()
return state == 1
def wait_for_interrupt(timeout):
"""
Wait for an interrupt from a DAQ HAT to occur.
Pass a timeout in seconds. Pass -1 to wait forever or 0 to return
immediately. If the interrupt has not occurred before the timeout elapses
the function will return False.
This function only applies when using devices that can generate an
interrupt:
* MCC 152
Returns:
bool: The interrupt status - True = interrupt active, False = interrupt
inactive.
"""
_libc = _load_daqhats_library()
if _libc == 0:
return False
_libc.hat_wait_for_interrupt.argtypes = [c_int]
_libc.hat_wait_for_interrupt.restype = c_int
if timeout == -1:
timeout_ms = -1
elif timeout == 0:
timeout_ms = 0
else:
timeout_ms = timeout * 1000
state = _libc.hat_wait_for_interrupt(timeout_ms)
return state == 1
def interrupt_callback_enable(callback, user_data):
"""
Enable an interrupt callback function.
Set a function that will be called when a DAQ HAT interrupt occurs.
The function will be called when the DAQ HAT interrupt signal becomes
active, and cannot be called again until the interrupt signal becomes
inactive. Active sources become inactive when manually cleared (such as
reading the digital I/O inputs or clearing the interrupt enable.) If not
latched, an active source also becomes inactive when the value returns to
the original value (the value at the source before the interrupt was
generated.)
There may only be one callback function at a time; if you call this
when a function is already set as the callback function then it will be
replaced with the new function and the old function will no longer be called
if an interrupt occurs. The data argument to this function will be passed
to the callback function when it is called.
The callback function must have the form "callback(user_data)". For
example: ::
def my_function(data):
# This is my callback function.
print("The interrupt occurred, and returned {}.".format(data))
data[0] += 1
value = [0]
interrupt_enable_callback(my_function, value)
In this example *my_function()* will be called when the interrupt occurs,
and the list *value* will be passed as the user_data. Inside the callback it
will be received as *data*, but will still be the same object so any changes
made will be present in the original *value*. Every time the interrupt
occurs *value[0]* will be incremented and a higher number will be printed.
An integer was not used for *value* because integers are immutable in Python
so the original *value* would never change.
The callback may be disabled with :py:func:`interrupt_callback_disable`.
This function only applies when using devices that can generate an
interrupt:
* MCC 152
Args:
callback (callback function): The callback function.
user_data (object) Optional Python object or data to pass to the
callback function.
Raises:
Exception: Internal error enabling the callback.
"""
_libc = _load_daqhats_library()
if _libc == 0:
return
# callback must be an instance of HatCallback; legacy code may already
# encapsulate it, so handle both cases
if isinstance(callback, HatCallback):
my_callback = callback
else:
my_callback = HatCallback(callback)
# function argtype is provided by callback class
_libc.hat_interrupt_callback_enable.argtypes = [my_callback.cbfunctype,
c_void_p]
_libc.hat_interrupt_callback_enable.restype = c_int
# save the user data in the HatCallback object
my_callback.user_data = user_data
# pass the callback class handler function and void * to the library
if (_libc.hat_interrupt_callback_enable(my_callback.get_callback_func(),
None) != 0):
raise Exception("Could not enable callback function.")
# save reference so it isn't garbage collected
global _HAT_CALLBACK # pylint: disable=global-statement
_HAT_CALLBACK = my_callback
def interrupt_callback_disable():
"""
Disable interrupt callbacks.
Raises:
Exception: Internal error disabling the callback.
"""
_libc = _load_daqhats_library()
if _libc == 0:
return
_libc.hat_interrupt_callback_disable.argtypes = []
_libc.hat_interrupt_callback_disable.restype = c_int
if _libc.hat_interrupt_callback_disable() != 0:
raise Exception("Could not disable callback function.")
class Hat(object): # pylint: disable=too-few-public-methods
"""
DAQ HAT base class.
Args:
address (int): board address, must be 0-7.
Raises:
ValueError: the address is invalid.
"""
_RESULT_SUCCESS = 0
_RESULT_BAD_PARAMETER = -1
_RESULT_BUSY = -2
_RESULT_TIMEOUT = -3
_RESULT_LOCK_TIMEOUT = -4
_RESULT_INVALID_DEVICE = -5
_RESULT_RESOURCE_UNAVAIL = -6
_RESULT_COMMS_FAILURE = -7
_RESULT_UNDEFINED = -10
def __init__(self, address=0):
"""Initialize the class. Address must be 0-7."""
self._initialized = False
self._address = 0
# max address value is 7
if address in range(8):
self._address = address
else:
raise ValueError("Invalid address {}. Must be 0-7.".format(address))
self._lib = _load_daqhats_library()
if self._lib == 0:
raise Exception("daqhats shared library is not installed.")
self._initialized = True
return
def address(self):
"""Return the device address."""
return self._address
|
126b8e68effd60222be11caf39b5ea95e86aca96
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/mkldnn/test_fusion_lstm_int8_mkldnn_op.py
|
546a30c3046ced360e3ead1ca0a23a111932ac92
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 5,251
|
py
|
test_fusion_lstm_int8_mkldnn_op.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eager_op_test import OpTest
from test_fusion_lstm_op import ACTIVATION, fusion_lstm
class TestFusionLSTMINT8MKLDNNOp(OpTest):
def set_confs(self):
pass
def setUp(self):
self.op_type = "fusion_lstm"
self.lod = [[2, 3, 5, 4]]
self.IC = 3
self.OC = 5
self.is_reverse = False
self.has_initial_state = False
self.act_cell = 'tanh'
self.act_gate = 'sigmoid'
self.act_cand = 'tanh'
self.use_peepholes = False # LSTM u8 doesn't support peepholes
self.use_mkldnn = True
self.mkldnn_data_type = "int8"
self.force_fp32_output = False
self.error_margin = 1e-5
self.set_confs()
# RNN dimensions
T = sum(self.lod[0])
N = len(self.lod[0])
# Input data
x_f32 = np.random.rand(T, self.IC).astype('float32') * 2 - 1
scale_data = 63.0
shift_data = 64.0
x_u8 = np.rint(x_f32 * scale_data + shift_data).astype(np.uint8)
# WeightX/WeightH data
wx = np.random.rand(self.IC, 4 * self.OC).astype('float32') * 2 - 1
wh = np.random.rand(self.OC, 4 * self.OC).astype('float32') * 2 - 1
# Calculating weight scales
# scales = 127 / max(abs(channel_wise(weightsX + weightsH)))
s8_max = 127.0
scale_weights = s8_max / np.max(
np.abs(np.concatenate([wx[:, :], wh[:, :]], axis=0)), axis=0
)
scale_weights = scale_weights.astype('float')
if self.use_peepholes:
b = np.random.rand(1, 7 * self.OC).astype('float32')
else:
b = np.random.rand(1, 4 * self.OC).astype('float32')
w_b = np.copy(b[:, 0 : 4 * self.OC])
w_c = b[:, 4 * self.OC :] if self.use_peepholes else None
bx = np.random.normal(size=(1, 4 * self.OC)).astype('float32')
b[0, 0 : 4 * self.OC] += bx[0, :]
if self.has_initial_state:
h0 = np.random.rand(N, self.OC).astype('float32')
c0 = np.random.rand(N, self.OC).astype('float32')
else:
h0 = np.zeros((N, self.OC)).astype('float32')
c0 = np.zeros((N, self.OC)).astype('float32')
hidden_f32, c = fusion_lstm(
x_f32,
self.lod,
wx,
bx,
h0,
c0,
wh,
w_b,
w_c,
self.is_reverse,
ACTIVATION[self.act_gate],
ACTIVATION[self.act_cell],
ACTIVATION[self.act_cand],
)
self.inputs = {
'X': (x_u8, self.lod),
'WeightX': wx,
'WeightH': wh,
'Bias': b,
}
if self.has_initial_state:
self.inputs['H0'] = h0
self.inputs['C0'] = c0
if self.force_fp32_output:
self.error_margin = 1e-1
self.outputs = {
'Hidden': (hidden_f32, self.lod),
'Cell': (c, self.lod),
}
else:
self.error_margin = 2
hidden_u8 = np.rint(hidden_f32 * scale_data + shift_data).astype(
np.uint8
)
self.outputs = {
'Hidden': (hidden_u8, self.lod),
'Cell': (c, self.lod),
}
self.attrs = {
'gate_activation': self.act_gate,
'cell_activation': self.act_cell,
'candidate_activation': self.act_cand,
'is_reverse': self.is_reverse,
'use_peepholes': self.use_peepholes,
'use_mkldnn': self.use_mkldnn,
'mkldnn_data_type': self.mkldnn_data_type,
'force_fp32_output': self.force_fp32_output,
'Scale_data': scale_data,
'Shift_data': shift_data,
'Scale_weights': scale_weights,
}
def test_check_output(self):
for use_seq in {True, False}:
self.attrs['use_seq'] = use_seq
self.check_output(
check_dygraph=False,
no_check_set=["Cell"],
atol=self.error_margin,
)
class TestFusionLSTMINT8MKLDNNOp2(TestFusionLSTMINT8MKLDNNOp):
def set_confs(self):
self.force_fp32_output = True
class TestFusionLSTMINT8MKLDNNOp4(TestFusionLSTMINT8MKLDNNOp):
def set_confs(self):
self.is_reverse = True
class TestFusionLSTMINT8MKLDNNOp5(TestFusionLSTMINT8MKLDNNOp):
def set_confs(self):
self.has_initial_state = True
if __name__ == "__main__":
from paddle import enable_static
enable_static()
unittest.main()
|
7358fd17d1ca6ad45a86e585b0794e4682484f3f
|
530b180c3aade8e67cc61ad2baddff018f7d59a8
|
/robocorp-python-ls-core/src/robocorp_ls_core/__init__.py
|
a0f0c2082d9cf84959e2adca86fc83ed5fa2cb3b
|
[
"Apache-2.0"
] |
permissive
|
robocorp/robotframework-lsp
|
67a1f35b9268d349045eb8fe930ea381c2d94cae
|
d72e5310ed4a8165d7ee516d79e0accccaf7748c
|
refs/heads/master
| 2023-08-17T05:12:43.598270
| 2023-08-12T12:11:22
| 2023-08-12T12:13:21
| 235,202,865
| 167
| 72
|
Apache-2.0
| 2023-09-13T22:39:09
| 2020-01-20T21:31:20
|
Python
|
UTF-8
|
Python
| false
| false
| 122
|
py
|
__init__.py
|
import os.path
__file__ = os.path.abspath(__file__)
if __file__.endswith((".pyc", ".pyo")):
__file__ = __file__[:-1]
|
6738978d82bdbc182069e58badd7fbccfc8cb64c
|
1ad268817e4f048815df6e7b7669c45257a37b0e
|
/kartothek/io_components/cube/query/_group.py
|
8884ff077d1d3fd45e648bcce973ba07eb677916
|
[
"MIT"
] |
permissive
|
JDASoftwareGroup/kartothek
|
07c7f2fceb3dcee5cf8d0a6a93f4c1060eb0bcf4
|
1821ea5df60d4079d3911b3c2f17be11d8780e22
|
refs/heads/master
| 2023-05-26T11:43:04.781173
| 2021-12-10T09:15:19
| 2021-12-10T09:15:19
| 184,608,549
| 178
| 59
|
MIT
| 2023-05-15T21:56:50
| 2019-05-02T15:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 7,998
|
py
|
_group.py
|
"""
Query group code datastructure and load code.
"""
import typing
import attr
import pandas as pd
from kartothek.io_components.metapartition import SINGLE_TABLE, MetaPartition
from kartothek.utils.converters import converter_str
from kartothek.utils.pandas import (
concat_dataframes,
drop_sorted_duplicates_keep_last,
sort_dataframe,
)
__all__ = ("QueryGroup", "load_group", "quick_concat")
@attr.s(frozen=True)
class QueryGroup:
"""
Query group, aka logical partition w/ all kartothek metapartition and information required to load the data.
Parameters
----------
metapartition: Dict[int, Dict[str, Tuple[kartothek.io_components.metapartition.MetaPartition, ...]]]
Mapping from partition ID to metapartitions per dataset ID.
load_columns: Dict[str, Set[str]]
Columns to load.
output_columns: Tuple[str, ...]
Tuple of columns that will be returned from the query API.
predicates: Dict[str, Tuple[Tuple[Tuple[str, str, Any], ...], ...]]
Predicates for each dataset ID.
empty_df: Dict[str, pandas.DataFrame]
Empty DataFrame for each dataset ID.
dimension_columns: Tuple[str, ...]
Dimension columns, used for de-duplication and to join data.
restrictive_dataset_ids: Set[str]
Datasets (by Ktk_cube dataset ID) that are restrictive during the join process.
"""
metapartitions = attr.ib(
type=typing.Dict[int, typing.Dict[str, typing.Tuple[MetaPartition, ...]]]
)
load_columns = attr.ib(type=typing.Dict[str, typing.Tuple[str, ...]])
output_columns = attr.ib(type=typing.Tuple[str, ...])
predicates = attr.ib(
type=typing.Dict[
str,
typing.Tuple[typing.Tuple[typing.Tuple[str, str, typing.Any], ...], ...],
]
)
empty_df = attr.ib(type=typing.Dict[str, pd.DataFrame])
dimension_columns = attr.ib(type=typing.Tuple[str, ...])
restrictive_dataset_ids = attr.ib(type=typing.Set[str])
def _load_all_mps(mps, store, load_columns, predicates, empty):
"""
Load kartothek_cube-relevant data from all given MetaPartitions.
The result will be a concatenated Dataframe.
Parameters
----------
mps: Iterable[MetaPartition]
MetaPartitions to load.
store: simplekv.KeyValueStore
Store to load data from.
load_columns: List[str]
Columns to load.
predicates: Optional[List[List[Tuple[str, str, Any]]]]
Predicates to apply during load.
empty: pandas.DataFrame
Empty Dataframe dummy.
Returns
-------
df: pandas.DataFrame
Concatenated data.
"""
dfs_mp = []
for mp in mps:
mp = mp.load_dataframes(
store=store,
predicate_pushdown_to_io=True,
tables=[SINGLE_TABLE],
columns={SINGLE_TABLE: sorted(load_columns)},
predicates=predicates,
)
df = mp.data[SINGLE_TABLE]
df.columns = df.columns.map(converter_str)
dfs_mp.append(df)
return concat_dataframes(dfs_mp, empty)
def _load_partition_dfs(cube, group, partition_mps, store):
"""
Load partition Dataframes for seed, restrictive and other data.
The information about the merge strategy (seed, restricting, others) is taken from ``group``.
Parameters
----------
cube: Cube
Cube spec.
group: QueryGroup
Query group.
partition_mps: Dict[str, Iterable[MetaPartition]]
MetaPartitions for every dataset in this partition.
store: simplekv.KeyValueStore
Store to load data from.
Returns
-------
df_seed: pandas.DataFrame
Seed data.
dfs_restrict: List[pandas.DataFrame]
Restrictive data (for inner join).
dfs_other: List[pandas.DataFrame]
Other data (for left join).
"""
df_seed = None
dfs_restrict = []
dfs_other = []
for ktk_cube_dataset_id, empty in group.empty_df.items():
mps = partition_mps.get(ktk_cube_dataset_id, [])
df = _load_all_mps(
mps=mps,
store=store,
load_columns=list(group.load_columns[ktk_cube_dataset_id]),
predicates=group.predicates.get(ktk_cube_dataset_id, None),
empty=empty,
)
# de-duplicate and sort data
# PERF: keep order of dimensionality identical to group.dimension_columns
df_cols = set(df.columns)
dimensionality = [c for c in group.dimension_columns if c in df_cols]
df = sort_dataframe(df=df, columns=dimensionality)
df = drop_sorted_duplicates_keep_last(df, dimensionality)
if ktk_cube_dataset_id == cube.seed_dataset:
assert df_seed is None
df_seed = df
elif ktk_cube_dataset_id in group.restrictive_dataset_ids:
dfs_restrict.append(df)
else:
dfs_other.append(df)
assert df_seed is not None
return df_seed, dfs_restrict, dfs_other
def _load_partition(cube, group, partition_mps, store):
"""
Load partition and merge partition data within given QueryGroup.
The information about the merge strategy (seed, restricting, others) is taken from ``group``.
Parameters
----------
cube: Cube
Cube spec.
group: QueryGroup
Query group.
partition_mps: Dict[str, Iterable[MetaPartition]]
MetaPartitions for every dataset in this partition.
store: simplekv.KeyValueStore
Store to load data from.
Returns
-------
df: pandas.DataFrame
Merged data.
"""
# MEMORY: keep the DF references only as long as they are required:
# - use only 1 "intermediate result variable" called df_partition
# - consume the DFs lists (dfs_restrict, dfs_other) while iterating over them
df_partition, dfs_restrict, dfs_other = _load_partition_dfs(
cube=cube, group=group, partition_mps=partition_mps, store=store
)
while dfs_restrict:
df_partition = df_partition.merge(dfs_restrict.pop(0), how="inner")
while dfs_other:
df_partition = df_partition.merge(dfs_other.pop(0), how="left")
return df_partition.loc[:, list(group.output_columns)]
def load_group(group, store, cube):
"""
Load :py:class:`QueryGroup` and return DataFrame.
Parameters
----------
group: QueryGroup
Query group.
store: Union[Callable[[], simplekv.KeyValueStore], simplekv.KeyValueStore]
Store to load data from.
cube: kartothek.core.cube.cube.Cube
Cube specification.
Returns
-------
df: pandas.DataFrame
Dataframe, may be empty.
"""
if callable(store):
store = store()
partition_results = []
for partition_id in sorted(group.metapartitions.keys()):
partition_results.append(
_load_partition(
cube=cube,
group=group,
partition_mps=group.metapartitions[partition_id],
store=store,
)
)
# concat all partitions
return quick_concat(
dfs=partition_results,
dimension_columns=group.dimension_columns,
partition_columns=cube.partition_columns,
)
def quick_concat(dfs, dimension_columns, partition_columns):
"""
Fast version of::
pd.concat(
dfs,
ignore_index=True,
sort=False,
).sort_values(dimension_columns + partition_columns).reset_index(drop=True)
if inputs are presorted.
Parameters
-----------
dfs: Iterable[pandas.DataFrame]
DataFrames to concat.
dimension_columns: Iterable[str]
Dimension columns in correct order.
partition_columns: Iterable[str]
Partition columns in correct order.
Returns
-------
df: pandas.DataFrame
Concatenated result.
"""
return sort_dataframe(
df=concat_dataframes(dfs),
columns=list(dimension_columns) + list(partition_columns),
)
|
5daf74551dd020acb0add1638037b906b3f7180c
|
a4d8fcfa8084c5d36a862aeb0978327ff4cfe50f
|
/prototypes/c2p/c2p_1x1_4bpl_blitter.py
|
f87ac1548a5eccdd56c64e45585a8f4ced98b0e4
|
[
"Artistic-2.0"
] |
permissive
|
cahirwpz/demoscene
|
a0b548527d89a354b5b8dfd922f39d8b14d61643
|
cd4517ba69e26c96a69e505e305a6d0152972982
|
refs/heads/master
| 2023-03-17T13:06:43.731158
| 2023-03-13T19:48:47
| 2023-03-13T19:48:47
| 3,242,770
| 105
| 21
|
Artistic-2.0
| 2022-10-18T09:43:25
| 2012-01-22T23:03:06
|
C
|
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
c2p_1x1_4bpl_blitter.py
|
#!/usr/bin/env python3
from common import Bit, Word, Channel, Blit, Array
def c2p(bitplane_output=True):
m0 = Word.Mask('00ff')
m1 = Word.Mask('0f0f')
m2 = Word.Mask('3333')
m3 = Word.Mask('5555')
print("=[ c2p 1x1 4bpl (blitter) ]=".center(48, '-'))
def MakeWord(chars, color):
bits = []
for c in chars:
for i in range(4):
bits.append(Bit.Var(c, i, color))
return Word(bits)
A = Array.Make(MakeWord)
N = len(A)
Array.Print("Data:", *A)
B = Array.Zero(N, 16)
Blit(lambda a, b: ((a >> 8) & m0) | (b & ~m0),
N // 4, 2, Channel(A, 2, 2), Channel(A, 0, 2), Channel(B, 0, 2))
Blit(lambda a, b: ((a << 8) & ~m0) | (b & m0),
N // 4, 2, Channel(A, 0, 2), Channel(A, 2, 2), Channel(B, 2, 2))
Array.Print("Swap 8x4:", *B)
C = Array.Zero(N, 16)
Blit(lambda a, b: ((a >> 4) & m1) | (b & ~m1),
N // 2, 1, Channel(B, 1, 1), Channel(B, 0, 1), Channel(C, 0, 1))
Blit(lambda a, b: ((a << 4) & ~m1) | (b & m1),
N // 2, 1, Channel(B, 0, 1), Channel(B, 1, 1), Channel(C, 1, 1))
Array.Print("Swap 4x2:", *C)
D = Array.Zero(N, 16)
Blit(lambda a, b: ((a >> 2) & m2) | (b & ~m2),
N // 4, 2, Channel(C, 2, 2), Channel(C, 0, 2), Channel(D, 0, 2))
Blit(lambda a, b: ((a << 2) & ~m2) | (b & m2),
N // 4, 2, Channel(C, 0, 2), Channel(C, 2, 2), Channel(D, 2, 2))
Array.Print("Swap 2x2:", *D)
if bitplane_output:
E = [Array.Zero(N // 4, 16) for i in range(4)]
Blit(lambda a, b: ((a >> 1) & m3) | (b & ~m3), N // 4, 1,
Channel(D, 1, 3), Channel(D, 0, 3), Channel(E[0], 0, 0))
Blit(lambda a, b: ((a >> 1) & m3) | (b & ~m3), N // 4, 1,
Channel(D, 3, 3), Channel(D, 2, 3), Channel(E[2], 0, 0))
Blit(lambda a, b: ((a << 1) & ~m3) | (b & m3), N // 4, 1,
Channel(D, 0, 3), Channel(D, 1, 3), Channel(E[1], 0, 0))
Blit(lambda a, b: ((a << 1) & ~m3) | (b & m3), N // 4, 1,
Channel(D, 2, 3), Channel(D, 3, 3), Channel(E[3], 0, 0))
print("Bitplanes:")
Array.Print("[0]:", *E[0])
Array.Print("[1]:", *E[1])
Array.Print("[2]:", *E[2])
Array.Print("[3]:", *E[3])
else:
E = Array.Zero(N, 16)
Blit(lambda a, b: ((a >> 1) & m3) | (b & ~m3),
N // 2, 1, Channel(D, 1, 1), Channel(D, 0, 1), Channel(E, 0, 1))
Blit(lambda a, b: ((a << 1) & ~m3) | (b & m3),
N // 2, 1, Channel(D, 0, 1), Channel(D, 1, 1), Channel(E, 1, 1))
Array.Print("Swap 1x1:", *E)
c2p()
|
7d12e7d80c8ed791e46ff25eecae84f45ca96ce4
|
364774e29ef2474552ea3839de0951e63cbae0a6
|
/wouso/core/scoring/tests.py
|
b9f08a25a6acc568d2b0a577c8f8e1b0628e6943
|
[
"Apache-2.0"
] |
permissive
|
rosedu/wouso
|
66c50ef750cf79d6959768f7df93cc08607cc266
|
ed34c62ac925db719388f27fe5acb40376d8d0c1
|
refs/heads/master
| 2022-10-29T14:28:51.818073
| 2022-09-24T18:54:04
| 2022-09-24T18:54:04
| 2,965,476
| 121
| 97
|
NOASSERTION
| 2019-11-15T09:33:50
| 2011-12-12T16:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 8,440
|
py
|
tests.py
|
from django.test import TestCase
from django.db.models.query import QuerySet
from django.contrib.auth.models import User
from wouso.core.config.models import IntegerListSetting
from wouso.core.game.models import Game
from wouso.core import scoring, signals
from wouso.core.tests import WousoTest
from wouso.core.user.models import Player
from models import Formula, Coin, History
from sm import FormulaParsingError, setup_scoring, CORE_POINTS, check_setup, update_points, calculate
class ScoringTestCase(TestCase):
def setUp(self):
self.user, new = User.objects.get_or_create(username='33')
self.game = Game.get_instance()
self.coin = Coin.add('_test')
def tearDown(self):
#self.user.delete()
self.game.delete()
self.coin.delete()
def testHistoryFor(self):
no_history = scoring.history_for(self.user, self.game, external_id=999)
self.assertEqual(len(no_history), 0)
def testScoreSimple(self):
scoring.score_simple(self.user.get_profile(), self.coin, game=self.game, external_id=2, amount=10)
multiple = scoring.history_for(self.user, self.game, external_id=2)
self.assertTrue(isinstance(multiple, QuerySet))
self.assertEqual(len(multiple), 1)
history = list(multiple)[0]
self.assertTrue(isinstance(history, History))
self.assertEqual(history.amount, 10)
def testCalculate(self):
formula = Formula.add('_test_formula',
expression='_test=5', owner=self.game)
# Call by name
ret = scoring.calculate('_test_formula')
self.assertTrue(isinstance(ret, dict))
# Call by object
ret = scoring.calculate(formula)
self.assertTrue(isinstance(ret, dict))
self.assertEqual(ret['_test'], 5)
formula2 = Formula.add('_test_formula2',
expression='_test=5*3', owner=self.game)
ret = scoring.calculate(formula2)
self.assertTrue(isinstance(ret, dict))
self.assertEqual(ret['_test'], 15)
# Multiple coins
formula2.expression = '_test=5*3; points=4'
ret = scoring.calculate(formula2)
self.assertTrue(isinstance(ret, dict))
self.assertEqual(ret['_test'], 15)
self.assertEqual(ret['points'], 4)
# Fail safe
formula2.expression = '_test=5*cucu'
try:
ret = scoring.calculate(formula2)
# no error? wtf
self.assertFalse(True)
except Exception as e:
self.assertTrue(isinstance(e, FormulaParsingError))
def testScore(self):
formula = Formula.add('_test_formula_sc',
expression='_test=13', owner=self.game)
scoring.score(self.user.get_profile(), self.game, formula,
external_id=3)
hs = scoring.history_for(self.user, self.game, external_id=3)
self.assertTrue(isinstance(hs, QuerySet))
history = list(hs)[0]
# check if specific coin has been updated
self.assertEqual(history.coin, self.coin)
self.assertEqual(history.amount, 13)
class UpdateScoringTest(WousoTest):
def test_update_points_level_upgrade_first_time(self):
level_up_points = 80
IntegerListSetting.get('level_limits').set_value(str(level_up_points))
Coin.add('points')
Coin.add('gold')
Formula.add('level-gold', expression='gold=10*{level}', owner=None)
# Upgrade player's level
player = self._get_player()
player.points = level_up_points + 1
player.level_no = 1
player.save()
update_points(player, None)
coins = History.user_coins(player.user)
self.assertEqual(coins['gold'], 10 * player.max_level)
def test_update_points_level_downgrade(self):
level_up_points = 80
IntegerListSetting.get('level_limits').set_value(str(level_up_points))
Coin.add('points')
Coin.add('gold')
Formula.add('level-gold', expression='gold=10*{level}', owner=None)
# Upgrade player's level
player = self._get_player()
player.points = level_up_points + 1
player.level_no = 1
player.save()
update_points(player, None)
# Downgrade player's level
player.points = level_up_points - 1
player.save()
update_points(player, None)
coins = History.user_coins(player.user)
self.assertEqual(coins['gold'], 10 * player.max_level)
def test_update_points_level_upgrade_back(self):
level_up_points = 80
IntegerListSetting.get('level_limits').set_value(str(level_up_points))
Coin.add('points')
Coin.add('gold')
Formula.add('level-gold', expression='gold=10*{level}', owner=None)
# Upgrade player's level
player = self._get_player()
player.points = level_up_points + 1
player.level_no = 1
player.save()
update_points(player, None)
# Downgrade player's level
player.points = level_up_points - 1
player.save()
update_points(player, None)
#Upgrade player's level back
player.points = level_up_points + 1
player.save()
update_points(player, None)
coins = History.user_coins(player.user)
self.assertEqual(coins['gold'], 10 * player.max_level)
class ScoringHistoryTest(WousoTest):
def test_user_coins(self):
Coin.add('points')
Coin.add('gold')
player = self._get_player()
scoring.score_simple(player, 'points', 10)
self.assertIn('points', History.user_coins(player.user))
def test_user_points(self):
coin = Coin.add('points')
player = self._get_player()
scoring.score_simple(player, 'points', 10)
up = History.user_points(user=player.user)
self.assertTrue('wouso' in up)
self.assertTrue(coin.name in up['wouso'])
self.assertEqual(up['wouso'][coin.name], 10)
def test_accessors(self):
player = self._get_player()
self.assertEqual(scoring.user_coins(player), scoring.user_coins(player.user))
def test_sync_methods(self):
player = self._get_player()
coin = Coin.add('points')
History.objects.create(user=player.user, coin=coin, amount=10)
self.assertEqual(player.points, 0)
scoring.sync_user(player)
self.assertEqual(player.points, 10)
History.objects.create(user=player.user, coin=coin, amount=10)
self.assertEqual(player.points, 10)
scoring.sync_all_user_points()
player = Player.objects.get(pk=player.pk)
self.assertEqual(player.points, 20)
class ScoringSetupTest(TestCase):
def test_check_setup(self):
setup_scoring()
self.assertTrue(check_setup())
def test_setup(self):
setup_scoring()
for c in CORE_POINTS:
self.assertTrue(Coin.get(c))
class ScoringFirstLogin(WousoTest):
def test_first_login_points(self):
f = Formula.add('start-points', expression='points=10')
Coin.add('points')
player = self._get_player()
self.assertEqual(player.points, 0)
# this won't work, since the activity is sent in our custom view
#self.client.login(username=player.user.username, password='test')
# using this instead
signals.addActivity.send(sender=None, user_from=player, action="login", game=None, public=False)
player = Player.objects.get(pk=player.pk)
self.assertEqual(player.points, 10)
class ScoringTestFunctions(TestCase):
def test_fibbonaci_formula(self):
formula = Formula.add('test-fib', expression='points=fib(0)')
value = calculate(formula)['points']
self.assertEqual(value, 0)
formula.expression = 'points=fib(1)'
formula.save()
value = calculate(formula)['points']
self.assertEqual(value, 1)
formula.expression = 'points=fib(2)'
formula.save()
value = calculate(formula)['points']
self.assertEqual(value, 1)
formula.expression = 'points=fib(3)'
formula.save()
value = calculate(formula)['points']
self.assertEqual(value, 2)
formula.expression = 'points=fib(4)'
formula.save()
value = calculate(formula)['points']
self.assertEqual(value, 3)
|
d11ed7408c2a7ee52c656653f559319f87ef6281
|
fd6f11eeba5e7191a115b2f39c930145d7ab83d8
|
/FingersExtras/Python/cycle through CC lanes.py
|
336e5e383f9c376d3d4fe5155f25112dee3bf2c9
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
reaper-oss/sws
|
30792fd38d7e969b0e4fe20ca9b256989452a959
|
a3693ad52ed16fe95ac0a570971656b6c9337f26
|
refs/heads/master
| 2023-09-02T01:43:13.746682
| 2022-10-11T22:21:17
| 2023-01-16T13:30:10
| 21,614,326
| 360
| 86
|
MIT
| 2023-08-23T11:43:23
| 2014-07-08T13:48:01
|
C++
|
UTF-8
|
Python
| false
| false
| 82
|
py
|
cycle through CC lanes.py
|
cmdId = RPR_NamedCommandLookup("_FNG_CYCLE_CC_LANE")
RPR_Main_OnCommand(cmdId, 0)
|
f5f392f829b283355bdbc4720b6bef269b3bb826
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/plex/view.py
|
ba883883ddc850064cdfac66f4e0607ef00e7577
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
view.py
|
"""Implement a view to provide proxied Plex thumbnails to the media browser."""
from __future__ import annotations
from http import HTTPStatus
import logging
from aiohttp import web
from aiohttp.hdrs import CACHE_CONTROL
from aiohttp.typedefs import LooseHeaders
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
from homeassistant.components.media_player import async_fetch_image
from .const import SERVERS
from .helpers import get_plex_data
_LOGGER = logging.getLogger(__name__)
class PlexImageView(HomeAssistantView):
"""Media player view to serve a Plex image."""
name = "api:plex:image"
url = "/api/plex_image_proxy/{server_id}/{media_content_id}"
async def get(
self,
request: web.Request,
server_id: str,
media_content_id: str,
) -> web.Response:
"""Start a get request."""
if not request[KEY_AUTHENTICATED]:
return web.Response(status=HTTPStatus.UNAUTHORIZED)
hass = request.app["hass"]
if (server := get_plex_data(hass)[SERVERS].get(server_id)) is None:
return web.Response(status=HTTPStatus.NOT_FOUND)
if (image_url := server.thumbnail_cache.get(media_content_id)) is None:
return web.Response(status=HTTPStatus.NOT_FOUND)
data, content_type = await async_fetch_image(_LOGGER, hass, image_url)
if data is None:
return web.Response(status=HTTPStatus.SERVICE_UNAVAILABLE)
headers: LooseHeaders = {CACHE_CONTROL: "max-age=3600"}
return web.Response(body=data, content_type=content_type, headers=headers)
|
b34fea38f0d0ff53416073fa9160a8264cc34462
|
d91d19da3589c3f69a834bbb9834386e80f100e0
|
/datashader/data_libraries/cudf.py
|
343a0639e0fe4d564290f8411b27a9f41b9f91cc
|
[] |
permissive
|
holoviz/datashader
|
11d518371e974c02ba3843871e3e0905e0c83956
|
b510594eb771d14cff3b69efca8ddd37ca3a1046
|
refs/heads/main
| 2023-08-18T13:55:24.214980
| 2023-08-17T08:45:48
| 2023-08-17T08:45:48
| 48,504,165
| 1,040
| 133
|
BSD-3-Clause
| 2023-09-11T09:51:30
| 2015-12-23T18:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 343
|
py
|
cudf.py
|
from __future__ import annotations
from datashader.data_libraries.pandas import default
from datashader.core import bypixel
import cudf
@bypixel.pipeline.register(cudf.DataFrame)
def cudf_pipeline(df, schema, canvas, glyph, summary, *, antialias=False):
return default(glyph, df, schema, canvas, summary, antialias=antialias, cuda=True)
|
a2ae09cefa0426601090d026bbbde91a0411655e
|
f85fd70c36571999d503343cf9e94c479339e980
|
/ansible/modules/hashivault/hashivault_identity_entity_alias.py
|
6e6f1ef7b1050fe7316fdde9a624f4f513958be4
|
[
"MIT"
] |
permissive
|
TerryHowe/ansible-modules-hashivault
|
2b1f219e69f7beb8c947ddc4907b7b1836c9e765
|
8ac45bb025a33de6027dca88e8870a67e154cae4
|
refs/heads/main
| 2023-09-01T01:31:18.124112
| 2023-08-14T14:04:53
| 2023-08-14T14:04:53
| 57,915,238
| 453
| 198
|
MIT
| 2023-08-14T14:04:55
| 2016-05-02T19:22:57
|
Python
|
UTF-8
|
Python
| false
| false
| 7,527
|
py
|
hashivault_identity_entity_alias.py
|
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_identity_entity_alias
version_added: "3.13.0"
short_description: Hashicorp Vault entity alias manage module
description:
- Module to manage identity entity aliases in Hashicorp Vault.
options:
name:
description:
- Name of the alias. Name should be the identifier of the client in the authentication source.
alias_id:
description:
- ID of the entity alias. If set, updates the corresponding entity alias.
entity_name:
description:
- Entity name to which this alias belongs to.
canonical_id:
description:
- Entity ID to which this alias belongs to.
mount_accessor:
description:
- Accessor of the mount to which the alias should belong to.
state:
description:
- whether crete/update or delete the entity
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_identity_entity_alias:
name: 'bob'
entity_name: 'bob'
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str', default=None)
argspec['alias_id'] = dict(required=False, type='str', default=None)
argspec['entity_name'] = dict(required=False, type='str', default=None)
argspec['canonical_id'] = dict(required=False, type='str', default=None)
argspec['mount_accessor'] = dict(required=False, type='str', default=None)
argspec['state'] = dict(required=False, choices=['present', 'absent'], default='present')
module = hashivault_init(argspec)
result = hashivault_identity_entity_alias(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
def hashivault_identity_entity_alias_update(client, alias_id, alias_name, canonical_id, mount_accessor):
try:
alias_details = client.secrets.identity.read_entity_alias(alias_id=alias_id)
except Exception as e:
return {'failed': True, 'msg': str(e)}
if alias_details['data']['canonical_id'] == canonical_id:
return {'changed': False}
try:
client.secrets.identity.update_entity_alias(
alias_id=alias_id,
name=alias_name,
canonical_id=canonical_id,
mount_accessor=mount_accessor
)
except Exception as e:
return {'failed': True, 'msg': str(e)}
return {'changed': True}
def hashivault_identity_entity_alias_create(client, alias_name, canonical_id, mount_accessor):
try:
list_of_aliases = client.secrets.identity.list_entity_aliases()
except Exception:
try:
alias_details = client.secrets.identity.create_or_update_entity_alias(
name=alias_name,
canonical_id=canonical_id,
mount_accessor=mount_accessor
)
except Exception as e:
return {'failed': True, 'msg': str(e)}
return {'changed': True, 'data': alias_details['data']}
for key, value in dict(list_of_aliases['data']['key_info']).items():
if value['mount_accessor'] == mount_accessor and value['name'] == alias_name:
return hashivault_identity_entity_alias_update(client, alias_id=key, alias_name=alias_name,
canonical_id=canonical_id,
mount_accessor=mount_accessor)
else:
try:
client.secrets.identity.create_or_update_entity_alias(name=alias_name, canonical_id=canonical_id,
mount_accessor=mount_accessor)
except Exception as e:
return {'failed': True, 'msg': str(e)}
return {'changed': True}
def hashivault_identity_entity_alias_delete(client, alias_id, alias_name, mount_accessor, canonical_id):
try:
list_of_aliases = client.secrets.identity.list_entity_aliases()
except Exception:
return {'changed': False}
else:
if alias_id is not None:
if alias_id not in list_of_aliases['data']['keys']:
return {'changed': False}
client.secrets.identity.delete_entity_alias(alias_id=alias_id)
return {'changed': True}
elif alias_name is not None:
for key, value in dict(list_of_aliases['data']['key_info']).items():
if value['mount_accessor'] == mount_accessor and \
value['name'] == alias_name and \
value['canonical_id'] == canonical_id:
client.secrets.identity.delete_entity_alias(alias_id=key)
return {'changed': True}
return {'changed': False}
return {'failed': True, 'msg': 'Either alias_id or name must be provided'}
@hashiwrapper
def hashivault_identity_entity_alias(params):
client = hashivault_auth_client(params)
alias_name = params.get('name')
alias_id = params.get('alias_id')
state = params.get('state')
mount_accessor = params.get('mount_accessor')
authtype = params.get('authtype')
entity_name = params.get('entity_name')
canonical_id = params.get('canonical_id')
# Get mount_accessor if not provided
if mount_accessor is None:
auth_method_details = client.read(path="/sys/auth/")
try:
mount_accessor = auth_method_details['data'][authtype + "/"]['accessor']
except Exception:
return {'failed': True, 'msg': 'Auth method %s not found. Use mount_accessor?' % authtype}
# Get canonical_id if not provided
if canonical_id is None:
if entity_name is None:
return {'failed': True, 'msg': 'Either canonical_id or entity_name must be provided'}
else:
try:
entity_details = client.secrets.identity.read_entity_by_name(
name=entity_name
)
except Exception:
return {'failed': True, 'msg': 'No entity with name %s' % entity_name}
canonical_id = entity_details['data']['id']
if state == 'present':
if alias_id is not None:
return hashivault_identity_entity_alias_update(client, alias_id=alias_id, alias_name=alias_name,
mount_accessor=mount_accessor, canonical_id=canonical_id)
elif alias_name is not None:
return hashivault_identity_entity_alias_create(client, alias_name=alias_name, mount_accessor=mount_accessor,
canonical_id=canonical_id)
else:
return {'failed': True, 'msg': 'Either alias_id or name must be provided'}
elif state == 'absent':
return hashivault_identity_entity_alias_delete(client, alias_id=alias_id, alias_name=alias_name,
mount_accessor=mount_accessor, canonical_id=canonical_id)
return {'failed': True, 'msg': 'Unknown state'}
if __name__ == '__main__':
main()
|
52f29a16942e8b6e31be5f85b096e2bb78d9f43b
|
bbd69601912a3361d788efd03a47f9d4e3bac09e
|
/demo/agw/PyGauge.py
|
fa05939284c18ea67a2d395b8e05bf9032fcffa4
|
[] |
no_license
|
wxWidgets/Phoenix
|
56929484460a0399a8f1d9582bc77c20aa14748d
|
a1184286703cf24c4b88e5bc14cf2979c1b1ea00
|
refs/heads/master
| 2023-09-01T07:10:17.437093
| 2023-08-31T05:38:01
| 2023-08-31T05:38:01
| 5,078,061
| 2,268
| 677
| null | 2023-09-09T17:06:59
| 2012-07-17T06:22:25
|
Python
|
UTF-8
|
Python
| false
| false
| 5,761
|
py
|
PyGauge.py
|
#!/usr/bin/env python
import wx
import os
import sys
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.split(dirName)[0])
try:
from agw import pygauge as PG
except ImportError: # if it's not there locally, try the wxPython lib.
try:
import wx.lib.agw.pygauge as PG
except:
raise Exception("This demo requires wxPython version greater than 2.9.0.0")
class PyGaugeDemo(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent)
self.log = log
self.mainPanel = wx.Panel(self)
self.mainPanel.SetBackgroundColour(wx.WHITE)
self.gauge1 = PG.PyGauge(self.mainPanel, -1, size=(100,25),style=wx.GA_HORIZONTAL)
self.gauge1.SetValue(80)
self.gauge1.SetBackgroundColour(wx.WHITE)
self.gauge1.SetBorderColor(wx.BLACK)
self.gauge2 = PG.PyGauge(self.mainPanel, -1, size=(100,25),style=wx.GA_HORIZONTAL)
self.gauge2.SetValue([20,80])
self.gauge2.SetBarColor([wx.Colour(162,255,178),wx.Colour(159,176,255)])
self.gauge2.SetBackgroundColour(wx.WHITE)
self.gauge2.SetBorderColor(wx.BLACK)
self.gauge2.SetBorderPadding(2)
self.gauge2.Update([30,0],2000)
self.gauge3 = PG.PyGauge(self.mainPanel, -1, size=(100,25),style=wx.GA_HORIZONTAL)
self.gauge3.SetValue(50)
self.gauge3.SetBarColor(wx.GREEN)
self.gauge3.SetBackgroundColour(wx.WHITE)
self.gauge3.SetBorderColor(wx.BLACK)
self.backColour = wx.ColourPickerCtrl(self.mainPanel, colour=self.gauge3.GetBackgroundColour())
self.borderColour = wx.ColourPickerCtrl(self.mainPanel, colour=self.gauge3.GetBorderColour())
self.barColour = wx.ColourPickerCtrl(self.mainPanel, colour=self.gauge3.GetBarColour())
self.gaugeValue = wx.TextCtrl(self.mainPanel, -1, str(self.gauge3.GetValue()), style=wx.TE_PROCESS_ENTER)
self.gaugeRange = wx.TextCtrl(self.mainPanel, -1, str(self.gauge3.GetRange()), style=wx.TE_PROCESS_ENTER)
self.gaugePadding = wx.TextCtrl(self.mainPanel, -1, str(self.gauge3.GetBorderPadding()), style=wx.TE_PROCESS_ENTER)
self.DoLayout()
self.BindEvents()
def DoLayout(self):
frameSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer = wx.BoxSizer(wx.VERTICAL)
colourSizer = wx.FlexGridSizer(2, 6, 1, 10)
label1 = wx.StaticText(self.mainPanel, -1, "Welcome to the PyGauge demo for wxPython!")
mainSizer.Add(label1, 0, wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, 10)
mainSizer.Add(self.gauge1, 0, wx.ALL, 20)
mainSizer.Add(self.gauge2, 0, wx.ALL, 20)
mainSizer.Add(self.gauge3, 0, wx.ALL, 20)
labelBack = wx.StaticText(self.mainPanel, -1, "Background Colour")
labelHover = wx.StaticText(self.mainPanel, -1, "Border Colour")
labelText = wx.StaticText(self.mainPanel, -1, "Bar Colour")
labelValue = wx.StaticText(self.mainPanel, -1, "Gauge Value ")
labelRange = wx.StaticText(self.mainPanel, -1, "Gauge Range")
labelPadding = wx.StaticText(self.mainPanel, -1, "Border Padding")
colourSizer.Add(labelBack)
colourSizer.Add(labelHover)
colourSizer.Add(labelText)
colourSizer.Add(labelValue)
colourSizer.Add(labelRange)
colourSizer.Add(labelPadding)
colourSizer.Add(self.backColour, 0, wx.EXPAND)
colourSizer.Add(self.borderColour, 0, wx.EXPAND)
colourSizer.Add(self.barColour, 0, wx.EXPAND)
colourSizer.Add(self.gaugeValue, 0, wx.EXPAND)
colourSizer.Add(self.gaugeRange, 0, wx.EXPAND)
colourSizer.Add(self.gaugePadding, 0, wx.EXPAND)
mainSizer.Add(colourSizer, 0, wx.EXPAND|wx.LEFT|wx.RIGHT, 10)
boldFont = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
boldFont.SetWeight(wx.FONTWEIGHT_BOLD)
for child in self.mainPanel.GetChildren():
if isinstance(child, wx.StaticText):
child.SetFont(boldFont)
self.mainPanel.SetSizer(mainSizer)
mainSizer.Layout()
frameSizer.Add(self.mainPanel, 1, wx.EXPAND)
self.SetSizer(frameSizer)
frameSizer.Layout()
def BindEvents(self):
self.Bind(wx.EVT_COLOURPICKER_CHANGED, self.OnPickColour)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter, self.gaugeValue)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter, self.gaugeRange)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter, self.gaugePadding)
def OnEnter(self,event):
obj = event.GetEventObject()
if obj == self.gaugeValue:
self.gauge3.SetValue(int(self.gaugeValue.GetValue()))
if obj == self.gaugeRange:
self.gauge3.SetRange(int(self.gaugeRange.GetValue()))
if obj == self.gaugePadding:
self.gauge3.SetBorderPadding(int(self.gaugePadding.GetValue()))
self.gauge3.Refresh()
def OnPickColour(self, event):
obj = event.GetEventObject()
colour = event.GetColour()
if obj == self.backColour:
self.gauge3.SetBackgroundColour(colour)
elif obj == self.borderColour:
self.gauge3.SetBorderColour(colour)
else:
self.gauge3.SetBarColour(colour)
self.gauge3.Refresh()
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = PyGaugeDemo(nb, log)
return win
#----------------------------------------------------------------------
overview = PG.__doc__
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
37cd38f593e436022fe5eb9e6997d2f794f8c355
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/DeprecatedContent/Scripts/HTMLDocsAutomation/HTMLDocsAutomation_test.py
|
cb8eba43672bdb9fe20fde0c7aa8fa65d92cc3ab
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 7,637
|
py
|
HTMLDocsAutomation_test.py
|
from CommonServerPython import *
import os
import demistomock as demisto
RETURN_ERROR_TARGET = 'HTMLDocsAutomation.return_error'
def test_get_yaml_obj(mocker):
from HTMLDocsAutomation import get_yaml_obj
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# sanity
file_path = os.path.join('test_data', 'ANYRUN_yml.txt')
mocker.patch.object(demisto, 'getFilePath',
return_value={'path': file_path})
data = get_yaml_obj('12345')
# error count should not change
assert return_error_mock.call_count == 0
# call_args last call with a tuple of args list and kwargs
assert data['commonfields']['id'] == 'ANYRUN'
# invalid yml
mocker.patch.object(demisto, 'getFilePath',
return_value={'path': os.path.join('test_data', 'not_yml_file.txt')})
get_yaml_obj('234')
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert err_msg == 'Failed to open integration file: not a yml file'
# no such file
mocker.patch.object(demisto, 'getFilePath', side_effect=ValueError('no such file'))
get_yaml_obj('234')
assert return_error_mock.call_count == 2
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert err_msg == 'Failed to open integration file: no such file'
def test_extract_command():
from HTMLDocsAutomation import extract_command
# no args
cmd, args = extract_command('!no-args-command')
assert cmd == '!no-args-command'
assert args == {}
# sanity
cmd, args = extract_command('!command ip=8.8.8.8')
expected = {'ip': '8.8.8.8'}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
# edge cases
cmd, args = extract_command('!command SomeParam=8.8.8.8 dash-arg="args" special_chars="1qazxsw2 EW3- *3d" '
'backTick=`hello "hello" \'hello\'` triple_quotes="""this is a multi quotes"""')
expected = {
'SomeParam': '8.8.8.8',
'dash-arg': 'args',
'special_chars': '1qazxsw2 EW3- *3d',
'backTick': 'hello "hello" \'hello\'',
'triple_quotes': 'this is a multi quotes'
}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
cmd, args = extract_command('!command SomeParam="""hello\nthis is multiline"""')
expected = {
'SomeParam': 'hello\nthis is multiline',
}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
def test_generate_commands_section():
from HTMLDocsAutomation import generate_commands_section
yml_data = {
'script': {
'commands': [
{'deprecated': True,
'name': 'deprecated-cmd',
'description': 'desc'},
{'deprecated': False,
'name': 'non-deprecated-cmd',
'description': 'desc1'},
{'name': 'non-deprecated-cmd2',
'description': 'desc2.'}
]
}
}
section, errors = generate_commands_section(yml_data, {}, True)
expected_section = '''<h2>Commands</h2>
<p>
You can execute these commands from the Demisto CLI, as part of an automation, or in a playbook.
After you successfully execute a command, a DBot message appears in the War Room with the command details.
</p>
<ol>
<li><a href="#non-deprecated-cmd" target="_self">desc1: non-deprecated-cmd</a></li>
<li><a href="#non-deprecated-cmd2" target="_self">desc2: non-deprecated-cmd2</a></li>
</ol>
<h3 id="non-deprecated-cmd">1. non-deprecated-cmd</h3>
<hr>
<p>desc1</p>
<h5>Base Command</h5>
<p>
<code>non-deprecated-cmd</code>
</p>
<h5>Required Permissions</h5>
<p>The following permissions are required for this command.</p>
<ul>
<li>permission 1</li>
<li>permission 2</li>
</ul>
<h5>Input</h5>
There are no input arguments for this command.
<p> </p>
<h5>Context Output</h5>
There are no context output for this command.
<p> </p>
<h5>Command Example</h5>
<p>
<code> </code>
</p>
<h5>Human Readable Output</h5>
<p>
<!-- remove the following comments to manually add an image: -->
<!--
<a href="insert URL to your image" target="_blank" rel="noopener noreferrer"><img src="insert URL to your image"
alt="image" width="749" height="412"></a>
-->
</p>
<h3 id="non-deprecated-cmd2">2. non-deprecated-cmd2</h3>
<hr>
<p>desc2.</p>
<h5>Base Command</h5>
<p>
<code>non-deprecated-cmd2</code>
</p>
<h5>Required Permissions</h5>
<p>The following permissions are required for this command.</p>
<ul>
<li>permission 1</li>
<li>permission 2</li>
</ul>
<h5>Input</h5>
There are no input arguments for this command.
<p> </p>
<h5>Context Output</h5>
There are no context output for this command.
<p> </p>
<h5>Command Example</h5>
<p>
<code> </code>
</p>
<h5>Human Readable Output</h5>
<p>
<!-- remove the following comments to manually add an image: -->
<!--
<a href="insert URL to your image" target="_blank" rel="noopener noreferrer"><img src="insert URL to your image"
alt="image" width="749" height="412"></a>
-->
</p>
'''
assert section == expected_section
assert len(errors) == 2 # no example for both commands
def test_to_html_table():
from HTMLDocsAutomation import to_html_table
data = [
['hello', 'hello', 'hello'],
['world', 'world', 'world'],
['!', '!', '!'],
]
expected = '''<table style="width:750px" border="2" cellpadding="6">
<thead>
<tr>
<th><strong>header1</strong></th>
<th><strong>header2</strong></th>
<th><strong>header3</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td>hello</td>
<td>hello</td>
<td>hello</td>
</tr>
<tr>
<td>world</td>
<td>world</td>
<td>world</td>
</tr>
<tr>
<td>!</td>
<td>!</td>
<td>!</td>
</tr>
</tbody>
</table>
'''
assert to_html_table(['header1', 'header2', 'header3'], data) == expected
def test_human_readable_example_to_html():
from HTMLDocsAutomation import human_readable_example_to_html
data = [
{
'header1': 'hello',
'header2': 'hello',
},
{
'header1': 'world',
'header2': 'world',
},
]
md = tableToMarkdown('Title', data, headers=['header1', 'header2'])
expected = '''<h3>Title</h3>
<table style="width:750px" border="2" cellpadding="6">
<thead>
<tr>
<th><strong>header1</strong></th>
<th><strong>header2</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td> hello </td>
<td> hello </td>
</tr>
<tr>
<td> world </td>
<td> world </td>
</tr>
</tbody>
</table>
'''
assert human_readable_example_to_html(md) == expected
md = md + '\n# Headline\nsome text\nanother line of text\n' + md
expected = expected + '\n<h1>Headline</h1>\n<p>\nsome text\nanother line of text\n</p>\n' + expected
assert human_readable_example_to_html(md) == expected
# md = '''Key | Value
# - | -
# city | Mountain View
# country | US
# hostname | dns.google
# ip | 8.8.8.8
# loc | 37.3860,-122.0838
# org | AS15169 Google LLC
# postal | 94035
# readme | https://ipinfo.io/missingauth
# region | California
# {"lat": 37.386, "lng": -122.0838}'''
#
# print(human_readable_example_to_html(md))
|
9275a61fcd2898573e14a2a89e38f0790b56d20d
|
c5a69158ac5966d8ba8f3b2e2fb4c35d49a0658d
|
/onelinerizer/__init__.py
|
819e70addddb3290dffdeb3c906eab23eb0bcb75
|
[
"MIT"
] |
permissive
|
csvoss/onelinerizer
|
d47202733f1b6935146b05027381bea9fbde0d04
|
bad341f261d35e56872b4c22297a44dc6d5cfab3
|
refs/heads/master
| 2022-07-06T05:51:02.018368
| 2022-01-15T18:43:08
| 2022-01-15T18:43:08
| 29,375,764
| 1,126
| 102
|
MIT
| 2018-10-20T00:41:35
| 2015-01-17T01:32:08
|
Python
|
UTF-8
|
Python
| false
| false
| 38
|
py
|
__init__.py
|
from .onelinerizer import onelinerize
|
3317fe74c9c02d020ed6ce2cb00bc8c81bbb445f
|
857938ac2024b1e37f32a6631e85e179ae04b601
|
/src/python/test/test_simplex_tree_serialization.py
|
9ba78a5b9ce29bc1ccbe2c5fc9f0c91661442c81
|
[
"MIT"
] |
permissive
|
GUDHI/gudhi-devel
|
a2b08232a2ea66047b7a626d85dff0d50decc71c
|
2f76d9416e145282adcd8264438480008bd59f77
|
refs/heads/master
| 2023-08-31T13:44:17.776336
| 2023-08-29T20:20:16
| 2023-08-29T20:20:16
| 174,304,137
| 212
| 69
|
MIT
| 2023-09-14T15:34:48
| 2019-03-07T08:34:04
|
C++
|
UTF-8
|
Python
| false
| false
| 747
|
py
|
test_simplex_tree_serialization.py
|
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
Copyright (C) 2023 Inria
Modification(s):
- YYYY/MM Author: Description of the modification
"""
from gudhi import SimplexTree
import numpy as np
import pickle
import pytest
def test_pickle_simplex_tree():
st = SimplexTree.create_from_array(np.random.rand(10, 10))
for dim in [1, 2, 3]:
st.expansion(dim)
with open('stree.pkl','wb') as f:
pickle.dump(st, f)
with open('stree.pkl','rb') as f:
st_copy = pickle.load(f)
assert st == st_copy
|
4d1b30493599806c44c45eaff7580e650510e392
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/TensorFlow/Detection/SSD/models/research/object_detection/builders/dataset_builder.py
|
b71c054a739a3c137d267b3d4b2dde8078c5d0b0
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455
| 2023-08-23T10:09:12
| 2023-08-23T10:09:12
| 131,881,622
| 11,838
| 3,124
| null | 2023-08-28T16:57:33
| 2018-05-02T17:04:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,836
|
py
|
dataset_builder.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tf.data.Dataset builder.
Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.
Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""
import functools
import tensorflow as tf
import horovod.tensorflow as hvd
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import input_reader_pb2
def make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
This is useful in cases where make_one_shot_iterator wouldn't work because
the graph contains a hash table that needs to be initialized.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
def read_dataset(file_read_func, input_files, config):
"""Reads a dataset, and handles repetition and shuffling.
Args:
file_read_func: Function to use in tf.contrib.data.parallel_interleave, to
read every individual file into a tf.data.Dataset.
input_files: A list of file paths to read.
config: A input_reader_builder.InputReader object.
Returns:
A tf.data.Dataset of (undecoded) tf-records based on config.
"""
# Shard, shuffle, and read files.
filenames = tf.gfile.Glob(input_files)
if not filenames:
raise ValueError('Invalid input path specified in '
'`input_reader_config`.')
num_readers = config.num_readers
if num_readers > len(filenames):
num_readers = len(filenames)
tf.logging.warning('num_readers has been reduced to %d to match input file '
'shards.' % num_readers)
filename_dataset = tf.data.Dataset.from_tensor_slices(filenames)
if config.shuffle:
filename_dataset = filename_dataset.shuffle(
config.filenames_shuffle_buffer_size)
elif num_readers > 1:
tf.logging.warning('`shuffle` is false, but the input data stream is '
'still slightly shuffled since `num_readers` > 1.')
filename_dataset = filename_dataset.repeat(config.num_epochs or None)
records_dataset = filename_dataset.apply(
tf.contrib.data.parallel_interleave(
file_read_func,
cycle_length=num_readers,
block_length=config.read_block_length,
sloppy=config.shuffle))
if config.shuffle:
records_dataset = records_dataset.shuffle(config.shuffle_buffer_size)
return records_dataset
def build(input_reader_config, batch_size=None, transform_input_data_fn=None, multi_gpu=True):
"""Builds a tf.data.Dataset.
Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
records. Applies a padded batch to the resulting dataset.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
batch_size: Batch size. If batch size is None, no batching is performed.
transform_input_data_fn: Function to apply transformation to all records,
or None if no extra decoding is required.
Returns:
A tf.data.Dataset based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
config = input_reader_config.tf_record_input_reader
if not config.input_path:
raise ValueError('At least one input path must be specified in '
'`input_reader_config`.')
label_map_proto_file = None
if input_reader_config.HasField('label_map_path'):
label_map_proto_file = input_reader_config.label_map_path
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=input_reader_config.load_instance_masks,
instance_mask_type=input_reader_config.mask_type,
label_map_proto_file=label_map_proto_file,
use_display_name=input_reader_config.use_display_name,
num_additional_channels=input_reader_config.num_additional_channels)
def process_fn(value):
"""Sets up tf graph that decodes, transforms and pads input data."""
processed_tensors = decoder.decode(value)
if transform_input_data_fn is not None:
processed_tensors = transform_input_data_fn(processed_tensors)
return processed_tensors
dataset = read_dataset(
functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),
config.input_path[:], input_reader_config)
if multi_gpu:
dataset = dataset.shard(hvd.size(), hvd.rank())
# TODO(rathodv): make batch size a required argument once the old binaries
# are deleted.
if batch_size:
num_parallel_calls = batch_size * input_reader_config.num_parallel_batches
else:
num_parallel_calls = input_reader_config.num_parallel_map_calls
dataset = dataset.map(
process_fn,
num_parallel_calls=num_parallel_calls)
if batch_size:
dataset = dataset.apply(
tf.contrib.data.batch_and_drop_remainder(batch_size))
dataset = dataset.prefetch(input_reader_config.num_prefetch_batches)
return dataset
raise ValueError('Unsupported input_reader_config.')
|
53207fbb49532c4da6d3d8c8a185a800fbe6a3f7
|
3bcc6467f68abaa8dca7a8c2a544875df42e4164
|
/fastai/tabular/transform.py
|
d7bc255eaf5fd92467b9db28e67590c4981e4356
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
jantic/DeOldify
|
82cd4a520b21ebe12ac7f587297001c4ad7ebc56
|
141e873e42eb5e40665d20349f4b8e9a267ba1c4
|
refs/heads/master
| 2023-08-09T07:20:08.480890
| 2023-04-16T16:39:31
| 2023-04-16T16:39:31
| 155,636,246
| 17,137
| 2,637
|
MIT
| 2023-07-21T02:35:25
| 2018-10-31T23:32:34
|
Python
|
UTF-8
|
Python
| false
| false
| 9,761
|
py
|
transform.py
|
"Cleaning and feature engineering functions for structured data"
from ..torch_core import *
from pandas.api.types import is_numeric_dtype
from datetime import date, datetime
import calendar
__all__ = ['add_datepart', 'cont_cat_split', 'Categorify', 'FillMissing', 'FillStrategy', 'Normalize', 'TabularProc',
'add_elapsed_times', 'make_date', 'add_cyclic_datepart']
def make_date(df:DataFrame, date_field:str):
"Make sure `df[field_name]` is of the right date type."
field_dtype = df[date_field].dtype
if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
field_dtype = np.datetime64
if not np.issubdtype(field_dtype, np.datetime64):
df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)
def cyclic_dt_feat_names(time:bool=True, add_linear:bool=False)->List[str]:
"Return feature names of date/time cycles as produced by `cyclic_dt_features`."
fs = ['cos','sin']
attr = [f'{r}_{f}' for r in 'weekday day_month month_year day_year'.split() for f in fs]
if time: attr += [f'{r}_{f}' for r in 'hour clock min sec'.split() for f in fs]
if add_linear: attr.append('year_lin')
return attr
def cyclic_dt_features(d:Union[date,datetime], time:bool=True, add_linear:bool=False)->List[float]:
"Calculate the cos and sin of date/time cycles."
tt,fs = d.timetuple(), [np.cos, np.sin]
day_year,days_month = tt.tm_yday, calendar.monthrange(d.year, d.month)[1]
days_year = 366 if calendar.isleap(d.year) else 365
rs = d.weekday()/7, (d.day-1)/days_month, (d.month-1)/12, (day_year-1)/days_year
feats = [f(r * 2 * np.pi) for r in rs for f in fs]
if time and isinstance(d, datetime) and type(d) != date:
rs = tt.tm_hour/24, tt.tm_hour%12/12, tt.tm_min/60, tt.tm_sec/60
feats += [f(r * 2 * np.pi) for r in rs for f in fs]
if add_linear:
if type(d) == date: feats.append(d.year + rs[-1])
else:
secs_in_year = (datetime(d.year+1, 1, 1) - datetime(d.year, 1, 1)).total_seconds()
feats.append(d.year + ((d - datetime(d.year, 1, 1)).total_seconds() / secs_in_year))
return feats
def add_cyclic_datepart(df:DataFrame, field_name:str, prefix:str=None, drop:bool=True, time:bool=False, add_linear:bool=False):
"Helper function that adds trigonometric date/time features to a date in the column `field_name` of `df`."
make_date(df, field_name)
field = df[field_name]
prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name))
series = field.apply(partial(cyclic_dt_features, time=time, add_linear=add_linear))
columns = [prefix + c for c in cyclic_dt_feat_names(time, add_linear)]
df_feats = pd.DataFrame([item for item in series], columns=columns, index=series.index)
for column in columns: df[column] = df_feats[column]
if drop: df.drop(field_name, axis=1, inplace=True)
return df
def add_datepart(df:DataFrame, field_name:str, prefix:str=None, drop:bool=True, time:bool=False):
"Helper function that adds columns relevant to a date in the column `field_name` of `df`."
make_date(df, field_name)
field = df[field_name]
prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name))
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start',
'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[prefix + n] = getattr(field.dt, n.lower())
df[prefix + 'Elapsed'] = field.astype(np.int64) // 10 ** 9
if drop: df.drop(field_name, axis=1, inplace=True)
return df
def _get_elapsed(df:DataFrame,field_names:Collection[str], date_field:str, base_field:str, prefix:str):
for f in field_names:
day1 = np.timedelta64(1, 'D')
last_date,last_base,res = np.datetime64(),None,[]
for b,v,d in zip(df[base_field].values, df[f].values, df[date_field].values):
if last_base is None or b != last_base:
last_date,last_base = np.datetime64(),b
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1))
df[prefix + f] = res
return df
def add_elapsed_times(df:DataFrame, field_names:Collection[str], date_field:str, base_field:str):
field_names = listify(field_names)
#Make sure date_field is a date and base_field a bool
df[field_names] = df[field_names].astype('bool')
make_date(df, date_field)
work_df = df[field_names + [date_field, base_field]]
work_df = work_df.sort_values([base_field, date_field])
work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'After')
work_df = work_df.sort_values([base_field, date_field], ascending=[True, False])
work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'Before')
for a in ['After' + f for f in field_names] + ['Before' + f for f in field_names]:
work_df[a] = work_df[a].fillna(0).astype(int)
for a,s in zip([True, False], ['_bw', '_fw']):
work_df = work_df.set_index(date_field)
tmp = (work_df[[base_field] + field_names].sort_index(ascending=a)
.groupby(base_field).rolling(7, min_periods=1).sum())
tmp.drop(base_field,1,inplace=True)
tmp.reset_index(inplace=True)
work_df.reset_index(inplace=True)
work_df = work_df.merge(tmp, 'left', [date_field, base_field], suffixes=['', s])
work_df.drop(field_names,1,inplace=True)
return df.merge(work_df, 'left', [date_field, base_field])
def cont_cat_split(df, max_card=20, dep_var=None)->Tuple[List,List]:
"Helper function that returns column names of cont and cat variables from given df."
cont_names, cat_names = [], []
for label in df:
if label == dep_var: continue
if df[label].dtype == int and df[label].unique().shape[0] > max_card or df[label].dtype == float: cont_names.append(label)
else: cat_names.append(label)
return cont_names, cat_names
@dataclass
class TabularProc():
"A processor for tabular dataframes."
cat_names:StrList
cont_names:StrList
def __call__(self, df:DataFrame, test:bool=False):
"Apply the correct function to `df` depending on `test`."
func = self.apply_test if test else self.apply_train
func(df)
def apply_train(self, df:DataFrame):
"Function applied to `df` if it's the train set."
raise NotImplementedError
def apply_test(self, df:DataFrame):
"Function applied to `df` if it's the test set."
self.apply_train(df)
class Categorify(TabularProc):
"Transform the categorical variables to that type."
def apply_train(self, df:DataFrame):
"Transform `self.cat_names` columns in categorical."
self.categories = {}
for n in self.cat_names:
df.loc[:,n] = df.loc[:,n].astype('category').cat.as_ordered()
self.categories[n] = df[n].cat.categories
def apply_test(self, df:DataFrame):
"Transform `self.cat_names` columns in categorical using the codes decided in `apply_train`."
for n in self.cat_names:
df.loc[:,n] = pd.Categorical(df[n], categories=self.categories[n], ordered=True)
FillStrategy = IntEnum('FillStrategy', 'MEDIAN COMMON CONSTANT')
@dataclass
class FillMissing(TabularProc):
"Fill the missing values in continuous columns."
fill_strategy:FillStrategy=FillStrategy.MEDIAN
add_col:bool=True
fill_val:float=0.
def apply_train(self, df:DataFrame):
"Fill missing values in `self.cont_names` according to `self.fill_strategy`."
self.na_dict = {}
for name in self.cont_names:
if pd.isnull(df[name]).sum():
if self.add_col:
df[name+'_na'] = pd.isnull(df[name])
if name+'_na' not in self.cat_names: self.cat_names.append(name+'_na')
if self.fill_strategy == FillStrategy.MEDIAN: filler = df[name].median()
elif self.fill_strategy == FillStrategy.CONSTANT: filler = self.fill_val
else: filler = df[name].dropna().value_counts().idxmax()
df[name] = df[name].fillna(filler)
self.na_dict[name] = filler
def apply_test(self, df:DataFrame):
"Fill missing values in `self.cont_names` like in `apply_train`."
for name in self.cont_names:
if name in self.na_dict:
if self.add_col:
df[name+'_na'] = pd.isnull(df[name])
if name+'_na' not in self.cat_names: self.cat_names.append(name+'_na')
df[name] = df[name].fillna(self.na_dict[name])
elif pd.isnull(df[name]).sum() != 0:
raise Exception(f"""There are nan values in field {name} but there were none in the training set.
Please fix those manually.""")
class Normalize(TabularProc):
"Normalize the continuous variables."
def apply_train(self, df:DataFrame):
"Compute the means and stds of `self.cont_names` columns to normalize them."
self.means,self.stds = {},{}
for n in self.cont_names:
assert is_numeric_dtype(df[n]), (f"""Cannot normalize '{n}' column as it isn't numerical.
Are you sure it doesn't belong in the categorical set of columns?""")
self.means[n],self.stds[n] = df[n].mean(),df[n].std()
df[n] = (df[n]-self.means[n]) / (1e-7 + self.stds[n])
def apply_test(self, df:DataFrame):
"Normalize `self.cont_names` with the same statistics as in `apply_train`."
for n in self.cont_names:
df[n] = (df[n]-self.means[n]) / (1e-7 + self.stds[n])
|
ece677ecdddd910127494826a5515ac6a342b119
|
0e0ddc095823c54877c143adacbfcdd6355261de
|
/libqtile/widget/countdown.py
|
90915477edd88739d85cdcf680ab1b889dc71027
|
[
"MIT"
] |
permissive
|
qtile/qtile
|
b19108ca632871104a0783a4afbe7350a17b97db
|
3f8a00082ad880042d396477d9445954e8d29cf2
|
refs/heads/master
| 2023-09-01T19:31:09.419767
| 2023-09-01T19:10:00
| 2023-09-01T19:10:00
| 47,476
| 4,203
| 986
|
MIT
| 2023-09-11T21:21:56
| 2008-08-30T00:16:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
countdown.py
|
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 roger
# Copyright (c) 2014 Tycho Andersen
# Copyright (c) 2014 Adi Sieker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
from libqtile.widget import base
class Countdown(base.InLoopPollText):
"""A simple countdown timer text widget"""
defaults = [
(
"format",
"{D}d {H}h {M}m {S}s",
"Format of the displayed text. Available variables:"
"{D} == days, {H} == hours, {M} == minutes, {S} seconds.",
),
("update_interval", 1.0, "Update interval in seconds for the clock"),
("date", datetime.now(), "The datetime for the end of the countdown"),
]
def __init__(self, **config):
base.InLoopPollText.__init__(self, **config)
self.add_defaults(Countdown.defaults)
def poll(self):
now = datetime.now()
days = hours = minutes = seconds = 0
if not self.date < now:
delta = self.date - now
days = delta.days
hours, rem = divmod(delta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
data = {
"D": "%02d" % days,
"H": "%02d" % hours,
"M": "%02d" % minutes,
"S": "%02d" % seconds,
}
return self.format.format(**data)
|
8d43a82a2f85d9de3fdf789c38ec585ce839d1d5
|
2b5ffa18e7198e45fa77674b96dac8d91159fed7
|
/djangae/contrib/locking/admin.py
|
404efa8382b32e384caa19ce7c6cc19b6c45adbc
|
[
"BSD-3-Clause"
] |
permissive
|
potatolondon/djangae
|
73681d0c8302ac216f74bc00b980de368e8d4280
|
bef308632790bb6f87e71bb91183f57bad6bd149
|
refs/heads/master
| 2023-09-01T15:27:51.995232
| 2023-08-30T14:40:48
| 2023-08-30T14:40:48
| 10,217,788
| 474
| 155
|
BSD-3-Clause
| 2023-02-08T01:05:31
| 2013-05-22T10:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 106
|
py
|
admin.py
|
from django.contrib import admin
from .models import DatastoreLock
admin.site.register(DatastoreLock)
|
b7fc38352e74a9feef42b7e92207b28ca7659157
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/update-server/otupdate/common/ssh_key_management.py
|
64ad03210de417beafa1b667399e3f8479b94899
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 7,213
|
py
|
ssh_key_management.py
|
"""
ssh_key_management: Endpoints for managing SSH keys on the robot
"""
import contextlib
import functools
import hashlib
import ipaddress
import logging
import os
from typing import (
Any,
Generator,
IO,
List,
Tuple,
)
from aiohttp import web
from .handler_type import Handler
LOG = logging.getLogger(__name__)
def require_linklocal(handler: Handler) -> Handler:
"""Ensure the decorated is only called if the request is linklocal.
The host ip address should be in the X-Host-IP header (provided by nginx)
"""
@functools.wraps(handler)
async def decorated(request: web.Request) -> web.Response:
ipaddr_str = request.headers.get("x-host-ip")
invalid_req_data = {
"error": "bad-interface",
"message": (
f"The endpoint {request.rel_url}"
f" can only be used from link-local connections."
f" Make sure you're connected to this robot directly by cable"
f" and using this robot's wired IP address"
f" (not its wireless IP address)."
),
}
if not ipaddr_str:
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data=invalid_req_data, status=403
)
try:
addr = ipaddress.ip_address(ipaddr_str)
except ValueError:
LOG.exception(f"Couldn't parse host ip address {ipaddr_str}")
raise
if not addr.is_link_local:
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data=invalid_req_data, status=403
)
return await handler(request)
return decorated
@contextlib.contextmanager
def authorized_keys(mode: str = "r") -> Generator[IO[Any], None, None]:
"""Open the authorized_keys file. Separate function for mocking.
:param mode: As :py:meth:`open`
"""
path = "/var/home/.ssh/authorized_keys"
if not os.path.exists(path):
os.makedirs(os.path.dirname(path))
open(path, "w").close()
with open(path, mode) as ak:
yield ak
def get_keys() -> List[Tuple[str, str]]:
"""Return a list of tuples of [md5(pubkey), pubkey]"""
with authorized_keys() as ak:
return [
(hashlib.new("md5", line.encode()).hexdigest(), line)
for line in ak.read().split("\n")
if line.strip()
]
def remove_by_hash(hashval: str) -> None:
"""Remove the key whose md5 sum matches hashval.
:raises: KeyError if the hashval wasn't found
"""
key_details = get_keys()
with authorized_keys("w") as ak:
for keyhash, key in key_details:
if keyhash != hashval:
ak.write(f"{key}\n")
break
else:
raise KeyError(hashval)
def key_present(hashval: str) -> bool:
"""Check if the key whose md5 is hashval is in authorized_keys
:returns: ``True`` if the key is present, ``False`` otherwise
"""
return hashval in [keyhash for keyhash, _ in get_keys()]
@require_linklocal
async def list_keys(request: web.Request) -> web.Response:
"""List keys in the authorized_keys file.
GET /server/ssh_keys
-> 200 OK {"public_keys": [{"key_md5": md5 hex digest, "key": key string}]}
(or 403 if not from the link-local connection)
"""
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
{
"public_keys": [
{"key_md5": details[0], "key": details[1]} for details in get_keys()
]
},
status=200,
)
@require_linklocal
async def add(request: web.Request) -> web.Response:
"""Add a public key to the authorized_keys file.
POST /server/ssh_keys {"key": key string}
-> 201 Created
If the key string doesn't look like an openssh public key, rejects with 400
"""
def key_error(error: str, message: str) -> web.Response:
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={"error": error, "message": message}, status=400
)
body = await request.json()
if "key" not in body or not isinstance(body["key"], str):
return key_error("no-key", 'No "key" element in body')
pubkey = body["key"]
# Do some fairly minor sanitization; dropbear will ignore invalid keys but
# we still don’t want to have a bunch of invalid data in there
pubkey_parts = pubkey.split()
if len(pubkey_parts) == 0:
return key_error("bad-key", "Key is empty")
alg = pubkey_parts[0]
# We don’t allow dss so this has to be rsa or ecdsa and shouldn’t start
# with restrictions
if alg != "ssh-rsa" and not alg.startswith("ecdsa"):
LOG.warning(f"weird keyfile uploaded: starts with {alg}")
return key_error("bad-key", f"Key starts with invalid algorithm {alg}")
if "\n" in pubkey[:-1]:
LOG.warning("Newlines in keyfile that shouldn't be there")
return key_error("bad-key", "Key has a newline")
# This is a more or less correct key we can write
if "\n" == pubkey[-1]:
pubkey = pubkey[:-1]
hashval = hashlib.new("md5", pubkey.encode()).hexdigest()
if not key_present(hashval):
with authorized_keys("a") as ak:
ak.write(f"{pubkey}\n")
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={"message": f"Added key {hashval}", "key_md5": hashval}, status=201
)
@require_linklocal
async def clear(request: web.Request) -> web.Response:
"""Clear all public keys from authorized_keys
DELETE /server/ssh_keys
-> 200 OK if successful
(or 403 if not from the link-local connection)
"""
with authorized_keys("w") as ak:
ak.write("\n".join([]) + "\n")
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={
"message": "Keys cleared. " "Restart robot to take effect",
"restart_url": "/server/restart",
},
status=200,
)
@require_linklocal
async def remove(request: web.Request) -> web.Response:
"""Remove a public key from authorized_keys
DELETE /server/ssh_keys/:key_md5_hexdigest
-> 200 OK if the key was found
-> 404 Not Found otherwise
"""
requested_hash = request.match_info["key_md5"]
new_keys: List[str] = []
found = False
for keyhash, key in get_keys():
if keyhash == requested_hash:
found = True
else:
new_keys.append(key)
if not found:
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={
"error": "invalid-key-hash",
"message": f"No such key md5 {requested_hash}",
},
status=404,
)
with authorized_keys("w") as ak:
ak.write("\n".join(new_keys) + "\n")
return web.json_response( # type: ignore[no-untyped-call,no-any-return]
data={
"message": f"Key {requested_hash} deleted. " "Restart robot to take effect",
"restart_url": "/server/restart",
},
status=200,
)
|
2d5ccfc1c0cd6c6748227f3d6c70180752f1a96b
|
f2034c76a11ce6296131d2bab89a5dae7d59edfe
|
/python/dllib/src/bigdl/dllib/feature/dataset/movielens.py
|
c798769af64e0ff32e0108cfad0b37cf1fd82996
|
[
"Apache-2.0"
] |
permissive
|
intel-analytics/BigDL
|
e22cd917eecc7340bda3df4356acba0623a62ef6
|
4ffa012a426e0d16ed13b707b03d8787ddca6aa4
|
refs/heads/main
| 2023-08-22T06:31:37.923091
| 2023-08-22T02:58:42
| 2023-08-22T02:58:42
| 66,823,715
| 4,913
| 1,327
|
Apache-2.0
| 2023-09-14T10:41:50
| 2016-08-29T07:59:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
movielens.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import zipfile
import numpy as np
from bigdl.dllib.feature.dataset import base
SOURCE_URL = 'http://files.grouplens.org/datasets/movielens/'
def read_data_sets(data_dir):
"""
Parse or download movielens 1m data if train_dir is empty.
:param data_dir: The directory storing the movielens data
:return: a 2D numpy array with user index and item index in each row
"""
WHOLE_DATA = 'ml-1m.zip'
local_file = base.maybe_download(WHOLE_DATA, data_dir, SOURCE_URL + WHOLE_DATA)
zip_ref = zipfile.ZipFile(local_file, 'r')
extracted_to = os.path.join(data_dir, "ml-1m")
if not os.path.exists(extracted_to):
print("Extracting %s to %s" % (local_file, data_dir))
zip_ref.extractall(data_dir)
zip_ref.close()
rating_files = os.path.join(extracted_to, "ratings.dat")
rating_list = [i.strip().split("::") for i in open(rating_files, "r").readlines()]
movielens_data = np.array(rating_list).astype(int)
return movielens_data
def get_id_pairs(data_dir):
movielens_data = read_data_sets(data_dir)
return movielens_data[:, 0:2]
def get_id_ratings(data_dir):
movielens_data = read_data_sets(data_dir)
return movielens_data[:, 0:3]
if __name__ == "__main__":
movielens_data = read_data_sets("/tmp/movielens/")
|
d0e44a8cdd8c1f11566dd014a6c31c8f04768efb
|
f2d478767381afe660940cb94f760f476b977d74
|
/natasha/const.py
|
b9393b0a91a571055ee86497bfc18e8fd068bf0e
|
[
"MIT"
] |
permissive
|
natasha/natasha
|
7b434d797a76d740fed8133b4b0241a9096f794b
|
ebe33ff1be333af0059aa34da825dfd4abb15dc2
|
refs/heads/master
| 2023-08-02T08:37:09.681688
| 2023-07-24T10:07:57
| 2023-07-24T10:07:57
| 64,834,778
| 1,013
| 107
|
MIT
| 2023-07-24T10:13:22
| 2016-08-03T09:49:51
|
Python
|
UTF-8
|
Python
| false
| false
| 37
|
py
|
const.py
|
PER = 'PER'
LOC = 'LOC'
ORG = 'ORG'
|
796e38cc575a37fa7993ec3d7075ffb81e10a0d9
|
a9ed175e2cf09ea29fe72b90dc4186ff9515c9b0
|
/tests/integration/suite/test_istio.py
|
049428186e6d2f3f8a08a29c951a9753433ed6d3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
rancher/rancher
|
96d67163506fa4ab219dca4d86d9bfebacb68a6d
|
90fc68836fd8d1bd2996283500f954ace40203ba
|
refs/heads/release/v2.8
| 2023-08-29T06:23:12.480919
| 2023-08-28T21:35:36
| 2023-08-28T21:35:36
| 26,337,322
| 21,773
| 3,508
|
Apache-2.0
| 2023-09-14T19:51:31
| 2014-11-07T20:49:31
|
Go
|
UTF-8
|
Python
| false
| false
| 5,482
|
py
|
test_istio.py
|
import os
import pytest
import subprocess
from .common import random_str
from .conftest import cluster_and_client, ClusterContext
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
istio_crd_url = "https://raw.githubusercontent.com/istio/istio/1.1.5" \
"/install/kubernetes/helm/istio-init/files/crd-10.yaml"
@pytest.mark.skip(reason='skipping for now, enable with istio 1.22 fix')
@pytest.mark.nonparallel
def test_virtual_service(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
client.create_virtualService(
name=name,
namespaceId=ns.id,
hosts=["test"],
http=[{
"route": [
{
"destination": {
"host": "test",
"subset": "v1"
}
}
]
}],
)
virtualServices = client.list_virtualService(
namespaceId=ns.id
)
assert len(virtualServices) == 1
client.delete(virtualServices.data[0])
client.delete(ns)
@pytest.mark.skip(reason='skipping for now, enable with istio 1.22 fix')
@pytest.mark.nonparallel
def test_destination_rule(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
client.create_destinationRule(
name=name,
namespaceId=ns.id,
host="test",
subsets=[{
"name": "v1",
"labels": {
"version": "v1",
}
}],
)
destinationRules = client.list_destinationRule(
namespaceId=ns.id
)
assert len(destinationRules) == 1
client.delete(destinationRules.data[0])
client.delete(ns)
# consistentHash has a "oneOf" only openAPI validation on it,
# and our types were passing multiple options which failed.
# This test ensures you can pass a single option.
# See: https://github.com/rancher/rancher/issues/25515
@pytest.mark.skip(reason='skipping for now, enable with istio 1.22 fix')
@pytest.mark.nonparallel
def test_destination_rule_on_cookie(admin_pc, remove_resource):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
name = random_str()
cookie_name = name + "_cookie"
dr = client.create_destinationRule(
name=name,
namespaceId=ns.id,
host="test",
subsets=[{
"name": "v1",
"labels": {
"version": "v1",
}
}],
trafficPolicy={
"loadBalancer": {
"consistentHash": {
"httpCookie": {
"ttl": "0s",
"name": cookie_name,
}
}
}
}
)
remove_resource(dr)
destinationRules = client.list_destinationRule(
namespaceId=ns.id
)
assert len(destinationRules) == 1
assert destinationRules.data[0].trafficPolicy.loadBalancer\
.consistentHash.httpCookie.name == cookie_name
@pytest.mark.skip(reason='skipping for now, enable with istio 1.22 fix')
@pytest.mark.nonparallel
def test_gateway(admin_pc):
client = admin_pc.client
ns = admin_pc.cluster.client.create_namespace(
name=random_str(),
projectId=admin_pc.project.id)
name = random_str()
client.create_gateway(
name=name,
namespaceId=ns.id,
servers=[{
"hosts": [
"*",
],
"port": {
"number": 443,
"name": "https",
"protocol": "HTTPS",
},
"tls": {
"mode": "SIMPLE",
"serverCertificate": "/etc/certs/server.pem",
"privateKey": "/etc/certs/privatekey.pem",
}
}],
)
gateways = client.list_gateway(
namespaceId=ns.id
)
assert len(gateways) == 1
client.delete(gateways.data[0])
client.delete(ns)
@pytest.fixture(scope='module', autouse="True")
def install_crd(admin_mc):
cluster, client = cluster_and_client('local', admin_mc.client)
cc = ClusterContext(admin_mc, cluster, client)
create_kubeconfig(cc.cluster)
try:
return subprocess.check_output(
'kubectl apply ' +
' --kubeconfig ' + kube_fname +
' -f ' + istio_crd_url,
stderr=subprocess.STDOUT, shell=True,
)
except subprocess.CalledProcessError as err:
print('kubectl error: ' + str(err.output))
raise err
def teardown_module(module):
try:
return subprocess.check_output(
'kubectl delete ' +
' --kubeconfig ' + kube_fname +
' -f ' + istio_crd_url,
stderr=subprocess.STDOUT, shell=True,
)
except subprocess.CalledProcessError as err:
print('kubectl error: ' + str(err.output))
raise err
def create_kubeconfig(cluster):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(kube_fname, "w")
file.write(generateKubeConfigOutput.config)
file.close()
|
c0f971a88b7bb01c06bbf5d7f5953a9c3f26f39f
|
84724b34b3f1e84dc53cbca5f3660590dbc34a9f
|
/nova/api/validation/validators.py
|
b0e9478d35e268d2a531b74483065dee4a7133fc
|
[
"Apache-2.0"
] |
permissive
|
openstack/nova
|
2c24b64e3677595611715bae6dda14edd3f90a24
|
065c5906d2da3e2bb6eeb3a7a15d4cd8d98b35e9
|
refs/heads/master
| 2023-08-28T15:10:05.126314
| 2023-08-25T20:31:27
| 2023-08-25T20:31:27
| 790,031
| 2,287
| 2,320
|
Apache-2.0
| 2023-07-08T02:10:29
| 2010-07-22T02:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 13,318
|
py
|
validators.py
|
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Internal implementation of request Body validating middleware.
"""
import re
import string
import jsonschema
from jsonschema import exceptions as jsonschema_exc
import netaddr
from oslo_serialization import base64
from oslo_utils import timeutils
from oslo_utils import uuidutils
import rfc3986
from nova.api.validation import parameter_types
from nova import exception
from nova.i18n import _
@jsonschema.FormatChecker.cls_checks('regex')
def _validate_regex_format(instance):
if not instance or not isinstance(instance, str):
return False
try:
re.compile(instance)
except re.error:
return False
return True
@jsonschema.FormatChecker.cls_checks('date-time')
def _validate_datetime_format(instance):
try:
timeutils.parse_isotime(instance)
except ValueError:
return False
else:
return True
@jsonschema.FormatChecker.cls_checks('base64')
def _validate_base64_format(instance):
try:
if isinstance(instance, str):
instance = instance.encode('utf-8')
base64.decode_as_bytes(instance)
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
return False
return True
@jsonschema.FormatChecker.cls_checks('cidr')
def _validate_cidr_format(cidr):
try:
netaddr.IPNetwork(cidr)
except netaddr.AddrFormatError:
return False
if '/' not in cidr:
return False
if re.search(r'\s', cidr):
return False
return True
@jsonschema.FormatChecker.cls_checks('uuid')
def _validate_uuid_format(instance):
return uuidutils.is_uuid_like(instance)
@jsonschema.FormatChecker.cls_checks('uri')
def _validate_uri(instance):
uri = rfc3986.uri_reference(instance)
validator = rfc3986.validators.Validator().require_presence_of(
'scheme', 'host',
).check_validity_of(
'scheme', 'userinfo', 'host', 'path', 'query', 'fragment',
)
try:
validator.validate(uri)
except rfc3986.exceptions.RFC3986Exception:
return False
return True
@jsonschema.FormatChecker.cls_checks('name_with_leading_trailing_spaces',
exception.InvalidName)
def _validate_name_with_leading_trailing_spaces(instance):
regex = parameter_types.valid_name_leading_trailing_spaces_regex
try:
if re.search(regex.regex, instance):
return True
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
pass
raise exception.InvalidName(reason=regex.reason)
@jsonschema.FormatChecker.cls_checks('name', exception.InvalidName)
def _validate_name(instance):
regex = parameter_types.valid_name_regex
try:
if re.search(regex.regex, instance):
return True
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
pass
raise exception.InvalidName(reason=regex.reason)
@jsonschema.FormatChecker.cls_checks('az_name_with_leading_trailing_spaces',
exception.InvalidName)
def _validate_az_name_with_leading_trailing_spaces(instance):
regex = parameter_types.valid_az_name_leading_trailing_spaces_regex
try:
if re.search(regex.regex, instance):
return True
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
pass
raise exception.InvalidName(reason=regex.reason)
@jsonschema.FormatChecker.cls_checks('az_name', exception.InvalidName)
def _validate_az_name(instance):
regex = parameter_types.valid_az_name_regex
try:
if re.search(regex.regex, instance):
return True
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
pass
raise exception.InvalidName(reason=regex.reason)
@jsonschema.FormatChecker.cls_checks('keypair_name_20',
exception.InvalidName)
def _validate_keypair_name_20(keypair_name):
safe_chars = "_- " + string.digits + string.ascii_letters
return _validate_keypair_name(keypair_name, safe_chars)
@jsonschema.FormatChecker.cls_checks('keypair_name_292',
exception.InvalidName)
def _validate_keypair_name_292(keypair_name):
safe_chars = "@._- " + string.digits + string.ascii_letters
return _validate_keypair_name(keypair_name, safe_chars)
def _validate_keypair_name(keypair_name, safe_chars):
clean_value = "".join(x for x in keypair_name if x in safe_chars)
if clean_value != keypair_name:
reason = _("Only expected characters: [%s]") % safe_chars
raise exception.InvalidName(reason=reason)
return True
def _soft_validate_additional_properties(validator,
additional_properties_value,
instance,
schema):
"""This validator function is used for legacy v2 compatible mode in v2.1.
This will skip all the additional properties checking but keep check the
'patternProperties'. 'patternProperties' is used for metadata API.
If there are not any properties on the instance that are not specified in
the schema, this will return without any effect. If there are any such
extra properties, they will be handled as follows:
- if the validator passed to the method is not of type "object", this
method will return without any effect.
- if the 'additional_properties_value' parameter is True, this method will
return without any effect.
- if the schema has an additionalProperties value of True, the extra
properties on the instance will not be touched.
- if the schema has an additionalProperties value of False and there
aren't patternProperties specified, the extra properties will be stripped
from the instance.
- if the schema has an additionalProperties value of False and there
are patternProperties specified, the extra properties will not be
touched and raise validation error if pattern doesn't match.
"""
if (not validator.is_type(instance, "object") or
additional_properties_value):
return
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
extra_properties = set()
for prop in instance:
if prop not in properties:
if patterns:
if not re.search(patterns, prop):
extra_properties.add(prop)
else:
extra_properties.add(prop)
if not extra_properties:
return
if patterns:
error = "Additional properties are not allowed (%s %s unexpected)"
if len(extra_properties) == 1:
verb = "was"
else:
verb = "were"
yield jsonschema_exc.ValidationError(
error % (", ".join(repr(extra) for extra in extra_properties),
verb))
else:
for prop in extra_properties:
del instance[prop]
class FormatChecker(jsonschema.FormatChecker):
"""A FormatChecker can output the message from cause exception
We need understandable validation errors messages for users. When a
custom checker has an exception, the FormatChecker will output a
readable message provided by the checker.
"""
def check(self, instance, format):
"""Check whether the instance conforms to the given format.
:argument instance: the instance to check
:type: any primitive type (str, number, bool)
:argument str format: the format that instance should conform to
:raises: :exc:`FormatError` if instance does not conform to format
"""
if format not in self.checkers:
return
# For safety reasons custom checkers can be registered with
# allowed exception types. Anything else will fall into the
# default formatter.
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
msg = "%r is not a %r" % (instance, format)
raise jsonschema_exc.FormatError(msg, cause=cause)
class _SchemaValidator(object):
"""A validator class
This class is changed from Draft4Validator to validate minimum/maximum
value of a string number(e.g. '10'). This changes can be removed when
we tighten up the API definition and the XML conversion.
Also FormatCheckers are added for checking data formats which would be
passed through nova api commonly.
"""
validator = None
validator_org = jsonschema.Draft4Validator
def __init__(self, schema, relax_additional_properties=False,
is_body=True):
self.is_body = is_body
validators = {
'minimum': self._validate_minimum,
'maximum': self._validate_maximum,
}
if relax_additional_properties:
validators[
'additionalProperties'] = _soft_validate_additional_properties
validator_cls = jsonschema.validators.extend(self.validator_org,
validators)
format_checker = FormatChecker()
self.validator = validator_cls(schema, format_checker=format_checker)
def validate(self, *args, **kwargs):
try:
self.validator.validate(*args, **kwargs)
except jsonschema.ValidationError as ex:
if isinstance(ex.cause, exception.InvalidName):
detail = ex.cause.format_message()
elif len(ex.path) > 0:
if self.is_body:
# NOTE: For whole OpenStack message consistency, this error
# message has been written as the similar format of
# WSME.
detail = _("Invalid input for field/attribute %(path)s. "
"Value: %(value)s. %(message)s") % {
'path': ex.path.pop(),
'value': ex.instance,
'message': ex.message}
else:
# NOTE: Use 'ex.path.popleft()' instead of 'ex.path.pop()',
# due to the structure of query parameters is a dict
# with key as name and value is list. So the first
# item in the 'ex.path' is the key, and second item
# is the index of list in the value. We need the
# key as the parameter name in the error message.
# So pop the first value out of 'ex.path'.
detail = _("Invalid input for query parameters %(path)s. "
"Value: %(value)s. %(message)s") % {
'path': ex.path.popleft(),
'value': ex.instance,
'message': ex.message}
else:
detail = ex.message
raise exception.ValidationError(detail=detail)
except TypeError as ex:
# NOTE: If passing non string value to patternProperties parameter,
# TypeError happens. Here is for catching the TypeError.
detail = str(ex)
raise exception.ValidationError(detail=detail)
def _number_from_str(self, instance):
try:
value = int(instance)
except (ValueError, TypeError):
try:
value = float(instance)
except (ValueError, TypeError):
return None
return value
def _validate_minimum(self, validator, minimum, instance, schema):
instance = self._number_from_str(instance)
if instance is None:
return
return self.validator_org.VALIDATORS['minimum'](validator, minimum,
instance, schema)
def _validate_maximum(self, validator, maximum, instance, schema):
instance = self._number_from_str(instance)
if instance is None:
return
return self.validator_org.VALIDATORS['maximum'](validator, maximum,
instance, schema)
|
679ea57cb930205fd254f2b5ea2f977d5f5fb6e0
|
9f84d91a8ae3df53b07fe3267992fba00a99ac9e
|
/test/nn/conv/cugraph/test_cugraph_gat_conv.py
|
307b9a780af682322226bf4050f7985f929c640c
|
[
"MIT"
] |
permissive
|
pyg-team/pytorch_geometric
|
ebea601eae228f3905465b5c2349d3fb3bb5cb26
|
a52af694b8ce6a80811e20966fe6d08a3e7511fe
|
refs/heads/master
| 2023-08-31T04:13:40.943308
| 2023-08-30T12:48:42
| 2023-08-30T12:48:42
| 106,024,057
| 6,775
| 1,563
|
MIT
| 2023-09-14T17:10:18
| 2017-10-06T16:03:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
test_cugraph_gat_conv.py
|
import pytest
import torch
from torch_geometric.nn import CuGraphGATConv, GATConv
from torch_geometric.testing import onlyCUDA, withPackage
@onlyCUDA
@withPackage('pylibcugraphops>=23.02')
@pytest.mark.parametrize('bias', [True, False])
@pytest.mark.parametrize('bipartite', [True, False])
@pytest.mark.parametrize('concat', [True, False])
@pytest.mark.parametrize('heads', [1, 2, 3])
@pytest.mark.parametrize('max_num_neighbors', [8, None])
def test_gat_conv_equality(bias, bipartite, concat, heads, max_num_neighbors):
in_channels, out_channels = (5, 2)
kwargs = dict(bias=bias, concat=concat)
size = (10, 8) if bipartite else (10, 10)
x = torch.rand(size[0], in_channels, device='cuda')
edge_index = torch.tensor([
[7, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7],
], device='cuda')
conv1 = GATConv(in_channels, out_channels, heads, add_self_loops=False,
**kwargs).cuda()
conv2 = CuGraphGATConv(in_channels, out_channels, heads, **kwargs).cuda()
with torch.no_grad():
conv2.lin.weight.data[:, :] = conv1.lin_src.weight.data
conv2.att.data[:heads * out_channels] = conv1.att_src.data.flatten()
conv2.att.data[heads * out_channels:] = conv1.att_dst.data.flatten()
if bipartite:
out1 = conv1((x, x[:size[1]]), edge_index)
else:
out1 = conv1(x, edge_index)
csc = CuGraphGATConv.to_csc(edge_index, size)
out2 = conv2(x, csc, max_num_neighbors=max_num_neighbors)
assert torch.allclose(out1, out2, atol=1e-3)
grad_output = torch.rand_like(out1)
out1.backward(grad_output)
out2.backward(grad_output)
assert torch.allclose(conv1.lin_src.weight.grad, conv2.lin.weight.grad,
atol=1e-3)
assert torch.allclose(conv1.att_src.grad.flatten(),
conv2.att.grad[:heads * out_channels], atol=1e-3)
assert torch.allclose(conv1.att_dst.grad.flatten(),
conv2.att.grad[heads * out_channels:], atol=1e-3)
if bias:
assert torch.allclose(conv1.bias.grad, conv2.bias.grad, atol=1e-3)
|
b05447e4e0a84f56ca92d36cd19f9eee08ec602c
|
989ab792e4675cb52df0c5864b51512ba8725ac2
|
/symfit/core/support.py
|
58a4376f4034d8e6333a47e80926ddd6df2c13a9
|
[
"MIT"
] |
permissive
|
tBuLi/symfit
|
6519edb124f5fed60722d0fe0ad718a54592d181
|
f484129e53fbc76fca1f61ce62bc6589083e8fc4
|
refs/heads/master
| 2023-05-24T21:38:04.299166
| 2023-05-08T13:10:58
| 2023-05-08T13:10:58
| 24,005,390
| 225
| 23
|
MIT
| 2023-03-14T10:35:23
| 2014-09-13T20:48:28
|
Python
|
UTF-8
|
Python
| false
| false
| 11,776
|
py
|
support.py
|
# SPDX-FileCopyrightText: 2014-2020 Martin Roelfs
#
# SPDX-License-Identifier: MIT
"""
This module contains support functions and convenience methods used
throughout symfit. Some are used predominantly internally, others are
designed for users.
"""
from __future__ import print_function
from collections import OrderedDict
import warnings
import re
import keyword
import numpy as np
from sympy.utilities.lambdify import lambdify
import sympy
from sympy.tensor import Idx
from sympy import symbols, MatrixExpr
from sympy.core.expr import Expr
from symfit.core.argument import Parameter, Variable
import symfit.core.printing # Overwrites some numpy printing
import inspect
from functools import wraps
class deprecated(object):
"""
Decorator to raise a DeprecationWarning.
"""
def __init__(self, replacement=None):
"""
:param replacement: The function which should now be used instead.
"""
self.replacement = replacement
def __call__(self, func):
@wraps(func)
def deprecated_func(*args, **kwargs):
warnings.warn(DeprecationWarning(
'`{}` has been deprecated.'.format(func.__name__)
+ ' Use `{}` instead.'.format(self.replacement)) if self.replacement else ''
)
return func(*args, **kwargs)
return deprecated_func
def seperate_symbols(func):
"""
Seperate the symbols in symbolic function func. Return them in alphabetical
order.
:param func: scipy symbolic function.
:return: (vars, params), a tuple of all variables and parameters, each
sorted in alphabetical order.
:raises TypeError: only symfit Variable and Parameter are allowed, not sympy
Symbols.
"""
params = []
vars = []
for symbol in func.free_symbols:
if not str(symbol).isidentifier():
continue # E.g. Indexed objects might print to A[i, j]
if isinstance(symbol, Parameter):
params.append(symbol)
elif isinstance(symbol, Idx):
# Idx objects are not seen as parameters or vars.
pass
elif isinstance(symbol, (MatrixExpr, Expr)):
vars.append(symbol)
else:
raise TypeError('model contains an unknown symbol type, {}'.format(type(symbol)))
for der in func.atoms(sympy.Derivative):
# Used by jacobians and hessians, where derivatives are treated as
# Variables. This way of writing it is purposefully discriminatory
# against derivatives wrt variables, since such derivatives should be
# performed explicitly in the case of jacs/hess, and are treated
# differently in the case of ODEModels.
if der.expr in vars and all(isinstance(s, Parameter) for s in der.variables):
vars.append(der)
params.sort(key=lambda symbol: symbol.name)
vars.sort(key=lambda symbol: symbol.name)
return vars, params
def sympy_to_py(func, args):
"""
Turn a symbolic expression into a Python lambda function,
which has the names of the variables and parameters as it's argument names.
:param func: sympy expression
:param args: variables and parameters in this model
:return: lambda function to be used for numerical evaluation of the model.
"""
# replace the derivatives with printable variables.
derivatives = {var: Variable(var.name) for var in args
if isinstance(var, sympy.Derivative)}
func = func.xreplace(derivatives)
args = [derivatives[var] if isinstance(var, sympy.Derivative) else var
for var in args]
lambdafunc = lambdify(args, func, dummify=False)
# Check if the names of the lambda function are what we expect
signature = inspect.signature(lambdafunc)
sig_parameters = OrderedDict(signature.parameters)
for arg, lambda_arg in zip(args, sig_parameters):
if arg.name != lambda_arg:
break
else: # Lambdifying succesful!
return lambdafunc
# If we are here (very rare), then one of the lambda arg is still a Dummy.
# In this case we will manually handle the naming.
lambda_names = sig_parameters.keys()
arg_names = [arg.name for arg in args]
conversion = dict(zip(arg_names, lambda_names))
# Wrap the lambda such that arg names are translated into the correct dummy
# symbol names
@wraps(lambdafunc)
def wrapped_lambdafunc(*ordered_args, **kwargs):
converted_kwargs = {conversion[k]: v for k, v in kwargs.items()}
return lambdafunc(*ordered_args, **converted_kwargs)
# Update the signature of wrapped_lambdafunc to math our args
new_sig_parameters = OrderedDict()
for arg_name, dummy_name in conversion.items():
if arg_name == dummy_name: # Already has the correct name
new_sig_parameters[arg_name] = sig_parameters[arg_name]
else: # Change the dummy inspect.Parameter to the correct name
param = sig_parameters[dummy_name]
param = param.replace(name=arg_name)
new_sig_parameters[arg_name] = param
wrapped_lambdafunc.__signature__ = signature.replace(
parameters=new_sig_parameters.values()
)
return wrapped_lambdafunc
def sympy_to_scipy(func, vars, params):
"""
Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more.
:param func: sympy expression
:param vars: variables
:param params: parameters
:return: Scipy-style function to be used for numerical evaluation of the model.
"""
lambda_func = sympy_to_py(func, vars, params)
def f(x, p):
"""
Scipy style function.
:param x: list of arrays, NxM
:param p: tuple of parameter values.
"""
x = np.atleast_2d(x)
y = [x[i] for i in range(len(x))] if len(x[0]) else []
try:
ans = lambda_func(*(y + list(p)))
except TypeError:
# Possibly this is a constant function in which case it only has Parameters.
ans = lambda_func(*list(p))# * np.ones(x_shape)
return ans
return f
def variables(names, **kwargs):
"""
Convenience function for the creation of multiple variables. For more
control, consider using ``symbols(names, cls=Variable, **kwargs)`` directly.
:param names: string of variable names.
Example: x, y = variables('x, y')
:param kwargs: kwargs to be passed onto :func:`sympy.core.symbol.symbols`
:return: iterable of :class:`symfit.core.argument.Variable` objects
"""
return symbols(names, cls=Variable, seq=True, **kwargs)
def parameters(names, **kwargs):
"""
Convenience function for the creation of multiple parameters. For more
control, consider using ``symbols(names, cls=Parameter, **kwargs)`` directly.
The `Parameter` attributes `value`, `min`, `max` and `fixed` can also be provided
directly. If given as a single value, the same value will be set for all
`Parameter`'s. When a sequence, it must be of the same length as the number of
parameters created.
Example::
x1, x2 = parameters('x1, x2', value=[2.0, 1.3], min=0.0)
:param names: string of parameter names.
Example: a, b = parameters('a, b')
:param kwargs: kwargs to be passed onto :func:`sympy.core.symbol.symbols`.
`value`, `min` and `max` will be handled separately if they are sequences.
:return: iterable of :class:`symfit.core.argument.Parameter` objects
"""
sequence_fields = ['value', 'min', 'max', 'fixed']
sequences = {}
for attr in sequence_fields:
try:
iter(kwargs[attr])
except (TypeError, KeyError):
# Not iterable or not provided
pass
else:
sequences[attr] = kwargs.pop(attr)
if 'min' in sequences and 'max' in sequences:
for min, max in zip(sequences['min'], sequences['max']):
if min > max:
raise ValueError('The value of `min` should be less than or'
' equal to the value of `max`.')
params = symbols(names, cls=Parameter, seq=True, **kwargs)
for key, values in sequences.items():
try:
assert len(values) == len(params)
except AssertionError:
raise ValueError(
'`len` of keyword-argument `{}` does not match the number of '
'`Parameter`s created.'.format(attr)
)
except TypeError:
# Iterator do not have a `len` but are allowed.
pass
finally:
for param, value in zip(params, values):
setattr(param, key, value)
return params
class cached_property(property):
"""
A property which cashes the output of the first ever call and always returns
that value from then on, unless delete is called on the attribute.
This is typically used in converting `sympy` code into `scipy` compatible
code, which is computationally a very expensive step we would like to
perform only once.
Does not allow setting of the attribute.
"""
base_str = '_cached'
def __init__(self, *args, **kwargs):
super(cached_property, self).__init__(*args, **kwargs)
self.cache_attr = '{}_{}'.format(self.base_str, self.fget.__name__)
def __get__(self, obj, objtype=None):
"""
In case of a first call, this will call the decorated function and
return it's output. On every subsequent call, the same output will be
returned.
:param obj: the parent object this property is attached to.
:param objtype:
:return: Output of the first call to the decorated function.
"""
try:
return getattr(obj, self.cache_attr)
except AttributeError:
# Call the wrapped function with the obj instance as argument
setattr(obj, self.cache_attr, self.fget(obj))
return getattr(obj, self.cache_attr)
def __delete__(self, obj):
"""
Calling delete on the attribute will delete the cache.
:param obj: parent object.
"""
try:
delattr(obj, self.cache_attr)
except AttributeError:
pass
def jacobian(expr, symbols):
"""
Derive a symbolic expr w.r.t. each symbol in symbols. This returns a symbolic jacobian vector.
:param expr: A sympy Expr.
:param symbols: The symbols w.r.t. which to derive.
"""
jac = []
for symbol in symbols:
# Differentiate to every param
f = sympy.diff(expr, symbol)
jac.append(f)
return jac
def key2str(target):
"""
In ``symfit`` there are many dicts with symbol: value pairs.
These can not be used immediately as \*\*kwargs, even though this would make
a lot of sense from the context.
This function wraps such dict to make them usable as \*\*kwargs immediately.
:param target: `Mapping` to be made save
:return: `Mapping` of str(symbol): value pairs.
"""
return target.__class__((str(symbol), value) for symbol, value in target.items())
def D(*args, **kwargs):
# ToDo: Investigate sympy's inheritance properly to see if I can give a
# .name atribute to Derivative objects or subclasses.
return sympy.Derivative(*args, **kwargs)
def name(self):
"""
Save name which can be used for alphabetic sorting and can be turned
into a kwarg.
"""
base_str = 'd{}{}_'.format(self.derivative_count if
self.derivative_count > 1 else '', self.expr)
for var, count in self.variable_count:
base_str += 'd{}{}'.format(var, count if count > 1 else '')
return base_str
sympy.Derivative.name = property(name)
|
53a9397649c4f88e2f900fd1eba7f8e98e9a3529
|
f7c975bef40f90a1a291b97466e1c81dfe69bae6
|
/notifiers/notifier.py
|
6c10c3170ff714fbda273503e4e11da451a3949e
|
[
"BSD-3-Clause"
] |
permissive
|
duo-labs/secret-bridge
|
24f2ea9e8a3ad1b6befeae0fd183e772ea5ff645
|
c4f8d6e8b1cceae6edfab6f8827f375e4853257c
|
refs/heads/master
| 2023-08-15T03:40:15.622855
| 2023-02-17T17:25:35
| 2023-02-17T17:25:35
| 201,492,192
| 181
| 44
|
BSD-3-Clause
| 2023-07-25T21:14:10
| 2019-08-09T15:18:28
|
Python
|
UTF-8
|
Python
| false
| false
| 215
|
py
|
notifier.py
|
from abc import ABC, abstractmethod
class Notifier(ABC):
def __init__(self):
pass
@abstractmethod
def process(self, findings, detector_name):
"""Process a found secret."""
pass
|
076a4ed30977eae1b82d15a4e134202e7bc135b8
|
cf482a354ef3287f8d80163d12dfd6207220007a
|
/python/multicorn/imapfdw.py
|
7a267ac4801a5f977c2662289d639515760fc571
|
[
"PostgreSQL"
] |
permissive
|
Segfault-Inc/Multicorn
|
f3996a9a01782dafc3ec5dbddf9c03fa40ab9a39
|
3b4da7f9f4ea339e71c6bb7df22d441c980b5aef
|
refs/heads/master
| 2023-07-31T04:02:13.289966
| 2020-08-18T13:56:06
| 2022-04-05T06:33:40
| 1,851,271
| 167
| 42
|
PostgreSQL
| 2023-09-04T18:50:12
| 2011-06-05T18:37:51
|
Python
|
UTF-8
|
Python
| false
| false
| 11,082
|
py
|
imapfdw.py
|
"""
Purpose
-------
This fdw can be used to access mails from an IMAP mailbox.
Column names are mapped to IMAP headers, and two special columns may contain the
mail payload and its flags.
.. api_compat:: :read:
Dependencies
-------------
imaplib
Options
--------
``host`` (required)
The IMAP host to connect to.
``port`` (required)
The IMAP host port to connect to.
``login`` (required)
The login to connect with.
``password`` (required)
The password to connect with.
The login and password options should be set as a user mapping options, so as
not to be stored in plaintext. See `the create user mapping documentation`_
.. _the create user mapping documentation: http://www.postgresql.org/docs/9.1/static/sql-createusermapping.html
``payload_column``
The name of the column which will store the payload.
``flags_column``
The name of the column which will store the IMAP flags, as an array of
strings.
``ssl``
Wether to use ssl or not
``imap_server_charset``
The name of the charset used for IMAP search commands. Defaults to UTF8. For
the cyrus IMAP server, it should be set to "utf-8".
``internal_date_column``
The column to use as the INTERNALDATE imap header.
Server side filtering
---------------------
The imap fdw tries its best to convert postgresql quals into imap filters.
The following quals are pushed to the server:
- equal, not equal, like, not like comparison
- = ANY, = NOT ANY
ntThese conditions are matched against the headers, or the body itself.
The imap FDW will fetch only what is needed by the query: you should thus avoid
requesting the payload_column if you don't need it.
"""
from . import ForeignDataWrapper, ANY, ALL
from .utils import log_to_postgres, ERROR, WARNING
from imaplib import IMAP4
import re
from multicorn.compat import basestring_
from email.header import decode_header
from imapclient import IMAPClient
from itertools import islice
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from functools import reduce
STANDARD_FLAGS = {
'seen': 'Seen',
'flagged': 'Flagged',
'delete': 'Deleted',
'draft': 'Draft',
'recent': 'Recent'
}
SEARCH_HEADERS = ['BCC', 'CC', 'FROM', 'TO']
def compact_fetch(messages):
"""Compact result in ranges.
For example, [1, 2, 3, 4, 10, 11, 12, 14, 17, 18, 19, 21, 92]
can be compacted in ['1:4', '10:12', '14', '17:19', '21', '92']
"""
first_i = messages[0]
for (i, inext) in zip_longest(messages, islice(messages, 1, None)):
if inext == i + 1:
continue
elif first_i != i:
yield '%s:%s' % (first_i, i)
first_i = inext
else:
yield "%s" % i
first_i = inext
class NoMatchPossible(Exception):
"""An exception raised when the conditions can NOT be met by any message,
ever."""
def make_or(values):
"""Create an imap OR filter based on a list of conditions to be or'ed"""
values = [x for x in values if x not in (None, '()')]
if values:
if len(values) > 1:
return reduce(lambda x, y: '(OR %s %s)' % (x, y), values)
else:
return values[0]
class ImapFdw(ForeignDataWrapper):
"""An imap foreign data wrapper
"""
def __init__(self, options, columns):
super(ImapFdw, self).__init__(options, columns)
self._imap_agent = None
self.host = options.get('host', None)
if self.host is None:
log_to_postgres('You MUST set the imap host',
ERROR)
self.port = options.get('port', None)
self.ssl = options.get('ssl', False)
self.login = options.get('login', None)
self.password = options.get('password', None)
self.folder = options.get('folder', 'INBOX')
self.imap_server_charset = options.get('imap_server_charset', 'UTF8')
self.columns = columns
self.payload_column = options.get('payload_column', None)
self.flags_column = options.get('flags_column', None)
self.internaldate_column = options.get('internaldate_column', None)
def get_rel_size(self, quals, columns):
"""Inform the planner that it can be EXTREMELY costly to use the
payload column, and that a query on Message-ID will return
only one row."""
width = len(columns) * 100
nb_rows = 1000000
if self.payload_column in columns:
width += 100000000000
nb_rows = nb_rows / (10 ** len(quals))
for qual in quals:
if qual.field_name.lower() == 'in-reply-to' and\
qual.operator == '=':
nb_rows = 10
if (qual.field_name.lower() == 'message-id' and
qual.operator == '='):
nb_rows = 1
break
return (nb_rows, width)
def _create_agent(self):
self._imap_agent = IMAPClient(self.host, self.port, ssl=self.ssl)
if self.login:
self._imap_agent.login(self.login, self.password)
self._imap_agent.select_folder(self.folder)
@property
def imap_agent(self):
if self._imap_agent is None:
self._create_agent()
try:
self._imap_agent.select_folder(self.folder)
except IMAP4.abort:
self._create_agent()
return self._imap_agent
def get_path_keys(self):
"""Helps the planner by supplying a list of list of access keys,
as well as a row estimate for each one."""
return [(('Message-ID',), 1), (('From',), 100), (('To',), 100),
(('In-Reply-To',), 10)]
def _make_condition(self, key, operator, value):
if operator not in ('~~', '!~~', '=', '<>', '@>', '&&', '~~*', '!~~*'):
# Do not manage special operators
return ''
if operator in ('~~', '!~~', '~~*', '!~~*') and\
isinstance(value, basestring_):
# 'Normalize' the sql like wildcards
if value.startswith(('%', '_')):
value = value[1:]
if value.endswith(('%', '_')):
value = value[:-1]
if re.match(r'.*[^\\][_%]', value):
return ''
value = value.replace('\\%', '%').replace('\\_', '_')
prefix = ''
if operator in ('!~~', '<>', '!~~*'):
if key == self.flags_column:
prefix = 'UN'
else:
prefix = 'NOT '
if isinstance(value, basestring_):
if value.lower() in STANDARD_FLAGS:
prefix = ''
value = value.upper()
if key == self.flags_column:
if operator == '@>':
# Contains on flags
return ' '.join(['%s%s' % (prefix,
(STANDARD_FLAGS.get(atom.lower(), '%s %s'
% ('KEYWORD', atom)))) for atom in value])
elif operator == '&&':
# Overlaps on flags => Or
values = ['(%s%s)' %
(prefix, (STANDARD_FLAGS.get(atom.lower(), '%s %s' %
('KEYWORD', atom)))) for atom in value]
return make_or(values)
else:
value = '\\\\%s' % value
elif key == self.payload_column:
value = 'TEXT "%s"' % value
elif key in SEARCH_HEADERS:
value = '%s "%s"' % (key, value)
else:
# Special case for Message-ID and In-Reply-To:
# zero-length strings are forbidden so dont bother
# searching them
if not value:
raise NoMatchPossible()
prefix = 'HEADER '
value = '%s "%s"' % (key, value)
return '%s%s' % (prefix, value)
def extract_conditions(self, quals):
"""Build an imap search criteria string from a list of quals"""
conditions = []
for qual in quals:
# Its a list, so we must translate ANY to OR, and ALL to AND
if qual.list_any_or_all == ANY:
values = [
'(%s)' % self._make_condition(qual.field_name,
qual.operator[0], value)
for value in qual.value]
conditions.append(make_or(values))
elif qual.list_any_or_all == ALL:
conditions.extend([
self._make_condition(qual.field_name, qual.operator[0],
value)
for value in qual.value])
else:
# its not a list, so everything is fine
conditions.append(self._make_condition(
qual.field_name,
qual.operator, qual.value))
conditions = [x for x in conditions if x not in (None, '()')]
return conditions
def execute(self, quals, columns):
# The header dictionary maps columns to their imap search string
col_to_imap = {}
headers = []
for column in list(columns):
if column == self.payload_column:
col_to_imap[column] = 'BODY[TEXT]'
elif column == self.flags_column:
col_to_imap[column] = 'FLAGS'
elif column == self.internaldate_column:
col_to_imap[column] = 'INTERNALDATE'
else:
col_to_imap[column] = ('BODY[HEADER.FIELDS (%s)]' %
column.upper())
headers.append(column)
try:
conditions = self.extract_conditions(quals) or ['ALL']
except NoMatchPossible:
matching_mails = []
else:
matching_mails = self.imap_agent.search(
charset=self.imap_server_charset,
criteria=conditions)
if matching_mails:
data = self.imap_agent.fetch(list(compact_fetch(matching_mails)),
list(col_to_imap.values()))
item = {}
for msg in data.values():
for column, key in col_to_imap.items():
item[column] = msg[key]
if column in headers:
item[column] = item[column].split(':', 1)[-1].strip()
values = decode_header(item[column])
for decoded_header, charset in values:
# Values are of the from "Header: value"
if charset:
try:
item[column] = decoded_header.decode(
charset)
except LookupError:
log_to_postgres('Unknown encoding: %s' %
charset, WARNING)
else:
item[column] = decoded_header
yield item
|
e4a939ef2861568ede1ab113be296febcf92c67b
|
f9f074c44b67a11d4630b5e1cc15e016e8d73cc8
|
/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_pd_deploy_status/migrations/0003_remove_deploystatus_performance.py
|
05f91a474583fca3500af70d47efb8b700f3f5ba
|
[
"MIT"
] |
permissive
|
Azure-Samples/azure-intelligent-edge-patterns
|
361694680c7e48d3761c5416175788355b684dcd
|
1d2f42cbf9f21157c1e1abf044b26160dfed5b16
|
refs/heads/master
| 2023-05-26T13:15:47.085088
| 2023-02-28T17:25:53
| 2023-02-28T17:25:53
| 186,706,933
| 193
| 164
|
MIT
| 2023-02-28T17:25:55
| 2019-05-14T22:02:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 289
|
py
|
0003_remove_deploystatus_performance.py
|
# Generated by Django 3.0.8 on 2020-08-28 07:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("azure_pd_deploy_status", "0002_auto_20200828_0616")]
operations = [migrations.RemoveField(model_name="deploystatus", name="performance")]
|
f792ded71cdd879f85ba047710ad2913c9074511
|
d6bcc2a87c2e419528c0edc98ebd3d3717a16716
|
/test/lib/ufe/testRotatePivot.py
|
e0ba3df8275e8970b8ec286bd3e6f8b63403b717
|
[
"DOC"
] |
permissive
|
Autodesk/maya-usd
|
ac9e03f39132c6b221032f21dc98805b4aa52d31
|
dc1c13a3f8012b2a99a45e46fb30250fd4b82487
|
refs/heads/dev
| 2023-09-05T07:39:58.640296
| 2023-09-01T19:56:30
| 2023-09-01T19:56:30
| 198,889,624
| 692
| 208
| null | 2023-09-14T20:49:17
| 2019-07-25T19:25:28
|
Mathematica
|
UTF-8
|
Python
| false
| false
| 10,199
|
py
|
testRotatePivot.py
|
#!/usr/bin/env python
#
# Copyright 2019 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mayaUsd.lib
import fixturesUtils
import mayaUtils
import ufeUtils
from maya import cmds
from maya import standalone
from maya.api import OpenMaya as om
from pxr import UsdGeom, Vt, Gf
import ufe
from math import degrees
from math import radians
from math import sin
import os
import unittest
def v3dToMPoint(v):
return om.MPoint(v.x(), v.y(), v.z())
def assertMPointAlmostEqual(testCase, a, b, places=7):
testCase.assertAlmostEqual(a.x, b.x)
testCase.assertAlmostEqual(a.y, b.y)
testCase.assertAlmostEqual(a.z, b.z)
testCase.assertAlmostEqual(a.w, b.w)
# Index into MMatrix linearly as a 16-element vector, starting at row 0.
def ndx(i, j):
return i*4+j
class RotatePivotTestCase(unittest.TestCase):
'''Verify the Transform3d UFE rotate pivot interface.
UFE Feature : Transform3d
Maya Feature : rotate pivot
Action : Set the rotate pivot.
Applied On Selection :
- No selection - Given node as param
- Single Selection : Not tested.
- Multiple Selection [Mixed, Non-Maya] : Not tested.
Undo/Redo Test : UFE undoable command only.
Expect Results To Test :
- Maya Dag object world space position.
- USD object world space position.
Edge Cases :
- None.
'''
pluginsLoaded = False
@classmethod
def setUpClass(cls):
fixturesUtils.readOnlySetUpClass(__file__, loadPlugin=False)
if not cls.pluginsLoaded:
cls.pluginsLoaded = mayaUtils.isMayaUsdPluginLoaded()
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def setUp(self):
''' Called initially to set up the maya test environment '''
# Load plugins
self.assertTrue(self.pluginsLoaded)
# Open twoSpheres.ma scene in testSamples
mayaUtils.openTwoSpheresScene()
def checkPos(self, m, p):
self.assertAlmostEqual(m[ndx(3,0)], p[0])
self.assertAlmostEqual(m[ndx(3,1)], p[1])
self.assertAlmostEqual(m[ndx(3,2)], p[2])
def testRotatePivot(self):
# mayaSphere is at (10, 0, 10) in local space, and since it has no
# parent, in world space as well.
spherePath = om.MSelectionList().add('mayaSphere').getDagPath(0)
sphereFn = om.MFnTransform(spherePath)
rotZ = radians(45)
rot = om.MEulerRotation(0, 0, rotZ)
# Pivot around x=0.
pivot = om.MPoint(-10, 0, 0)
sphereFn.setRotatePivot(pivot, om.MSpace.kTransform, False)
sphereFn.setRotation(rot, om.MSpace.kTransform)
# MFnTransform and MTransformationMatrix always return the individual
# components of the matrix, including translation, which is unaffected
# by pivot changes, as expected. The fully-multiplied result is in the
# computed MMatrix.
xyWorldValue = sin(rotZ) * 10
sphereMatrix = sphereFn.transformation().asMatrix()
self.checkPos(sphereMatrix, [xyWorldValue, xyWorldValue, 10])
# Do the same with the USD object, through UFE.
# USD sphere is at (10, 0, 0) in local space, and since its parents
# have an identity transform, in world space as well.
usdSpherePath = ufe.PathString.path(
'|usdSphereParent|usdSphereParentShape,/sphereXform/sphere')
usdSphereItem = ufe.Hierarchy.createItem(usdSpherePath)
t3d = ufe.Transform3d.transform3d(usdSphereItem)
t3d.rotatePivot(pivot[0], pivot[1], pivot[2])
usdPivot = t3d.rotatePivot()
assertMPointAlmostEqual(self, v3dToMPoint(usdPivot), pivot)
t3d.rotate(degrees(rot[0]), degrees(rot[1]), degrees(rot[2]))
sphereMatrix = om.MMatrix(t3d.inclusiveMatrix().matrix)
self.checkPos(sphereMatrix, [xyWorldValue, xyWorldValue, 0])
t3d.rotatePivot(0, 0, 0)
usdPivot = t3d.rotatePivot()
assertMPointAlmostEqual(self, v3dToMPoint(usdPivot), om.MPoint(0, 0, 0))
sphereMatrix = om.MMatrix(t3d.inclusiveMatrix().matrix)
self.checkPos(sphereMatrix, [10, 0, 0])
# Use a UFE undoable command to set the pivot.
rotatePivotCmd = t3d.rotatePivotCmd()
rotatePivotCmd.set(pivot[0], pivot[1], pivot[2])
usdPivot = t3d.rotatePivot()
assertMPointAlmostEqual(self, v3dToMPoint(usdPivot), pivot)
sphereMatrix = om.MMatrix(t3d.inclusiveMatrix().matrix)
self.checkPos(sphereMatrix, [xyWorldValue, xyWorldValue, 0])
rotatePivotCmd.undo()
usdPivot = t3d.rotatePivot()
assertMPointAlmostEqual(self, v3dToMPoint(usdPivot), om.MPoint(0, 0, 0))
sphereMatrix = om.MMatrix(t3d.inclusiveMatrix().matrix)
self.checkPos(sphereMatrix, [10, 0, 0])
# redo() cannot be tested, as it currently is intentionally not
# implemented, because the Maya move command handles undo by directly
# calling the translate() method. This is fragile,
# implementation-specific, and should be changed. PPT, 3-Sep-2020.
@unittest.skipUnless(mayaUtils.mayaMajorVersion() >= 2022, 'Requires Maya fixes only available in Maya 2022 or greater.')
def testRotatePivotCmd(self):
rotZ = radians(45)
rot = om.MEulerRotation(0, 0, rotZ)
# Pivot around x=0.
pivot = om.MPoint(-10, 0, 0)
pivotTranslate = om.MPoint(0, 0, 0)
xyWorldValue = sin(rotZ) * 10
rotatedPosition = [xyWorldValue, xyWorldValue, 0]
# USD sphere is at (10, 0, 0) in local space, and since its parents
# have an identity transform, in world space as well.
spherePath = ufe.PathString.path(
'|usdSphereParent|usdSphereParentShape,/sphereXform')
sphereItem = ufe.Hierarchy.createItem(spherePath)
ufe.GlobalSelection.get().append(sphereItem)
# Create a Transform3d interface to read from USD.
t3d = ufe.Transform3d.transform3d(sphereItem)
# Start with a non-zero initial rotate pivot. This is required to test
# MAYA-105345, otherwise a zero initial rotate pivot produces the
# correct result through an unintended code path.
t3d.rotatePivot(2, 0, 0)
usdPivot = t3d.rotatePivot()
assertMPointAlmostEqual(self, v3dToMPoint(usdPivot), om.MPoint(2, 0, 0))
t3d.rotatePivot(0, 0, 0)
# Start with a non-zero initial rotation. This is required to test
# MAYA-112175, otherwise a zero initial rotation means rotate pivot
# translation will be empty and we get the correct result by accident.
if (mayaUtils.mayaMajorVersion() >= 2023):
cmds.rotate(0, 0, 90)
print(type(pivot))
pivot = om.MPoint(0, 10, 0)
print(type(pivot))
pivotTranslate = om.MPoint(-10, -10, 0)
rotatedPosition = [xyWorldValue, -xyWorldValue, 0]
cmds.move(-10, 0, 0, relative=True, ufeRotatePivot=True)
usdPivot = t3d.rotatePivot()
assertMPointAlmostEqual(self, v3dToMPoint(usdPivot), pivot)
usdRotatePivotTranslation = t3d.rotatePivotTranslation()
assertMPointAlmostEqual(self, v3dToMPoint(usdRotatePivotTranslation), pivotTranslate)
cmds.undo()
usdPivot = t3d.rotatePivot()
assertMPointAlmostEqual(self, v3dToMPoint(usdPivot), om.MPoint(0, 0, 0))
usdRotatePivotTranslation = t3d.rotatePivotTranslation()
assertMPointAlmostEqual(self, v3dToMPoint(usdRotatePivotTranslation), om.MPoint(0, 0, 0))
cmds.redo()
usdPivot = t3d.rotatePivot()
assertMPointAlmostEqual(self, v3dToMPoint(usdPivot), pivot)
cmds.rotate(degrees(rot[0]), degrees(rot[1]), degrees(rot[2]))
sphereMatrix = om.MMatrix(t3d.inclusiveMatrix().matrix)
self.checkPos(sphereMatrix, rotatedPosition)
if (mayaUtils.mayaMajorVersion() >= 2023):
cmds.undo()
cmds.move(10, 10, 0, absolute=True)
sphereMatrix = om.MMatrix(t3d.inclusiveMatrix().matrix)
self.checkPos(sphereMatrix, [10, 10, 0])
cmds.move(10, 10, 0, absolute=True, rotatePivotRelative=True)
sphereMatrix = om.MMatrix(t3d.inclusiveMatrix().matrix)
self.checkPos(sphereMatrix, [20, 10, 0])
def testRotatePivotMatrixOp(self):
'''Setting the rotate pivot on a prim with a matrix op.'''
# Create a scene with an xform that has a matrix op.
import mayaUsd_createStageWithNewLayer
mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
proxyShapePathStr = '|stage1|stageShape1'
stage = mayaUsd.lib.GetPrim(proxyShapePathStr).GetStage()
xform = stage.DefinePrim('/Xform1', 'Xform')
xformable = UsdGeom.Xformable(xform)
transformOp = xformable.AddTransformOp()
transformOp.Set(Gf.Matrix4d(1.0))
self.assertEqual(xformable.GetXformOpOrderAttr().Get(), Vt.TokenArray([
"xformOp:transform"]))
xformItem = ufeUtils.createItem(proxyShapePathStr + ',/Xform1')
sn = ufe.GlobalSelection.get()
sn.clear()
sn.append(xformItem)
# Set the rotate and scale pivot. Since the matrix op does not support
# these, this will create a Maya fallback transform stack after the
# matrix op.
cmds.move(3, 2, 1, r=True, urp=True, usp=True)
# Read back the rotate pivot using the Transform3d interface.
t3d = ufe.Transform3d.transform3d(xformItem)
self.assertEqual(t3d.rotatePivot().vector, [3, 2, 1])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
20f38fd80d1be3b0f628849d069b5049c0d2abea
|
d4a88b3b102e20e727cae8fbd4167dcb4b57d1ec
|
/pymunk/examples/using_sprites.py
|
6e7364a329a53753cc5661d37b36d037fab87b5b
|
[
"MIT"
] |
permissive
|
viblo/pymunk
|
ca64888e45706db431788368ff8464edf2912d5f
|
20ac14f665fb38b4ef1bef5acea36a3d612dd0d5
|
refs/heads/master
| 2023-08-27T16:37:14.740653
| 2023-08-16T19:26:16
| 2023-08-16T19:26:16
| 13,273,472
| 855
| 255
|
MIT
| 2023-01-13T10:13:47
| 2013-10-02T14:36:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,794
|
py
|
using_sprites.py
|
"""Very basic example of using a sprite image to draw a shape more similar
how you would do it in a real game instead of the simple line drawings used
by the other examples.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import math
import random
from typing import List
import os.path
import pygame
import pymunk
from pymunk import Vec2d
def flipy(y):
"""Small hack to convert chipmunk physics to pygame coordinates"""
return -y + 600
def main():
pygame.init()
screen = pygame.display.set_mode((600, 600))
clock = pygame.time.Clock()
running = True
### Physics stuff
space = pymunk.Space()
space.gravity = Vec2d(0.0, -900.0)
## logo
logo_img = pygame.image.load(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "pymunk_logo_googlecode.png"
)
)
logos: List[pymunk.Shape] = []
### Static line
static_lines = [
pymunk.Segment(space.static_body, (11.0, 280.0), (407.0, 246.0), 0.0),
pymunk.Segment(space.static_body, (407.0, 246.0), (407.0, 343.0), 0.0),
]
for l in static_lines:
l.friction = 0.5
space.add(*static_lines)
ticks_to_next_spawn = 10
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pygame.image.save(screen, "using_sprites.png")
ticks_to_next_spawn -= 1
if ticks_to_next_spawn <= 0:
ticks_to_next_spawn = 100
x = random.randint(20, 400)
y = 500
angle = random.random() * math.pi
vs = [(-23, 26), (23, 26), (0, -26)]
mass = 10
moment = pymunk.moment_for_poly(mass, vs)
body = pymunk.Body(mass, moment)
shape = pymunk.Poly(body, vs)
shape.friction = 0.5
body.position = x, y
body.angle = angle
space.add(body, shape)
logos.append(shape)
### Update physics
dt = 1.0 / 60.0
for x in range(1):
space.step(dt)
### Draw stuff
screen.fill(pygame.Color("black"))
for logo_shape in logos:
# image draw
p = logo_shape.body.position
p = Vec2d(p.x, flipy(p.y))
# we need to rotate 180 degrees because of the y coordinate flip
angle_degrees = math.degrees(logo_shape.body.angle) + 180
rotated_logo_img = pygame.transform.rotate(logo_img, angle_degrees)
offset = Vec2d(*rotated_logo_img.get_size()) / 2
p = p - offset
screen.blit(rotated_logo_img, (round(p.x), round(p.y)))
# debug draw
ps = [
p.rotated(logo_shape.body.angle) + logo_shape.body.position
for p in logo_shape.get_vertices()
]
ps = [(round(p.x), round(flipy(p.y))) for p in ps]
ps += [ps[0]]
pygame.draw.lines(screen, pygame.Color("red"), False, ps, 1)
for line in static_lines:
body = line.body
pv1 = body.position + line.a.rotated(body.angle)
pv2 = body.position + line.b.rotated(body.angle)
p1 = round(pv1.x), round(flipy(pv1.y))
p2 = round(pv2.x), round(flipy(pv2.y))
pygame.draw.lines(screen, pygame.Color("lightgray"), False, [p1, p2], 2)
### Flip screen
pygame.display.flip()
clock.tick(50)
pygame.display.set_caption("fps: " + str(clock.get_fps()))
if __name__ == "__main__":
main()
|
b592f69985a923a502c11dc905bab7864818a6e2
|
c618bbf2719431999b1007461df0865bab60c883
|
/dali/test/python/reader/test_webdataset_requirements.py
|
17462ee1eb9892ade28dba898ef90404ecd1ad0a
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DALI
|
3d0d061135d19e092647e6522046b2ff23d4ef03
|
92ebbe5c20e460050abd985acb590e6c27199517
|
refs/heads/main
| 2023-09-04T01:53:59.033608
| 2023-09-01T13:45:03
| 2023-09-01T13:45:03
| 135,768,037
| 4,851
| 648
|
Apache-2.0
| 2023-09-12T18:00:22
| 2018-06-01T22:18:01
|
C++
|
UTF-8
|
Python
| false
| false
| 16,230
|
py
|
test_webdataset_requirements.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from glob import glob
import math
import nvidia.dali as dali
from test_utils import compare_pipelines, get_dali_extra_path
from nose_utils import assert_raises
from nose.tools import assert_equal
from webdataset_base import (generate_temp_extract, generate_temp_index_file,
webdataset_raw_pipeline, file_reader_pipeline)
from webdataset_base import test_batch_size # noqa:F401, this is a parameter used in tests
def test_return_empty():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/missing.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = glob(extract_dir.name + "/*")
equivalent_files = sorted(equivalent_files,
key=(lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")]))) # noqa: 203
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
missing_component_behavior="empty",
),
file_reader_pipeline(equivalent_files, ["jpg", []], batch_size=test_batch_size, device_id=0,
num_threads=1),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
def test_skip_sample():
num_samples = 500
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/missing.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = list(
filter(
lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")]) < 2500, # noqa: 203
sorted(glob(extract_dir.name + "/*"),
key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])), # noqa: 203
))
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
missing_component_behavior="skip",
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(equivalent_files, ["jpg", "cls"],
batch_size=test_batch_size, device_id=0, num_threads=1),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
missing_component_behavior="skip",
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
assert_equal(list(wds_pipeline.epoch_size().values())[0], num_samples)
def test_raise_error_on_missing():
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/missing.tar")
index_file = generate_temp_index_file(tar_file_path)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
missing_component_behavior="error",
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
assert_raises(RuntimeError, wds_pipeline.build, glob="Underful sample detected")
def test_different_components():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/scrambled.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = glob(extract_dir.name + "/*")
equivalent_files = sorted(equivalent_files,
key=(lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")]))) # noqa: 203
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "txt;cls"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(equivalent_files, ["jpg", {"txt", "cls"}],
batch_size=test_batch_size, device_id=0, num_threads=1),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
def test_dtypes():
num_samples = 100
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/sample-tar/dtypes.tar")
index_file = generate_temp_index_file(tar_file_path)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["float16", "int32", "float64"],
dtypes=[dali.types.FLOAT16, dali.types.INT32, dali.types.FLOAT64],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
for sample_idx in range(num_samples):
if sample_idx % test_batch_size == 0:
f16, i32, f64 = wds_pipeline.run()
assert (f16.as_array()[sample_idx % test_batch_size] == [float(sample_idx)] * 10).all()
assert (i32.as_array()[sample_idx % test_batch_size] == [int(sample_idx)] * 10).all()
assert (f64.as_array()[sample_idx % test_batch_size] == [float(sample_idx)] * 10).all()
def test_wds_sharding():
num_samples = 3000
tar_file_paths = [
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-1.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-2.tar"),
]
index_files = [generate_temp_index_file(tar_file_path) for tar_file_path in tar_file_paths]
extract_dirs = [generate_temp_extract(tar_file_path) for tar_file_path in tar_file_paths]
equivalent_files = sum(
list(
sorted(glob(extract_dir.name +
"/*"), key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])) # noqa: 203
for extract_dir in extract_dirs),
[],
)
compare_pipelines(
webdataset_raw_pipeline(
tar_file_paths,
[index_file.name for index_file in index_files],
["jpg", "cls"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files,
["jpg", "cls"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
def test_sharding():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = sorted(glob(extract_dir.name + "/*"),
key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])) # noqa: 203
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_pax_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
pax_tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/pax/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
pax_tar_file_path,
None,
ext=["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_case_sensitive_container_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
case_insensitive_tar_file_path = os.path.join(get_dali_extra_path(),
"db/webdataset/case_insensitive/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
with assert_raises(RuntimeError, glob="Underful sample detected at"):
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
case_insensitive_tar_file_path,
None,
ext=["jpg", "cls"],
missing_component_behavior="error",
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_case_sensitive_arg_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
with assert_raises(RuntimeError, glob="Underful sample detected at"):
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
ext=["Jpg", "cls"],
missing_component_behavior="error",
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_case_insensitive_container_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
case_insensitive_tar_file_path = os.path.join(get_dali_extra_path(),
"db/webdataset/case_insensitive/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
case_insensitive_tar_file_path,
None,
ext=["jpg", "cls"],
case_sensitive_extensions=False,
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_case_insensitive_arg_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
ext=["Jpg", "cls"],
case_sensitive_extensions=False,
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_index_generation():
num_samples = 3000
tar_file_paths = [
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-1.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-2.tar"),
]
extract_dirs = [generate_temp_extract(tar_file_path) for tar_file_path in tar_file_paths]
equivalent_files = sum(
list(
sorted(glob(extract_dir.name +
"/*"), key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])) # noqa: 203
for extract_dir in extract_dirs),
[],
)
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_paths,
[],
["jpg", "cls"],
missing_component_behavior="error",
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
|
68b211e8c998cf85dfe9f6050666bf307bf54e15
|
341414ede1071e23dd3870e291b6f74f1d02a3bb
|
/hickle/tests/test_legacy_load.py
|
7c0222433f5a7c1e0ccc168562a7079ed63eb479
|
[
"MIT"
] |
permissive
|
telegraphic/hickle
|
2ef897a9aafee1b1e5c7dd715beda8ea9d08d837
|
2b9142389417153d25bdcb6ec1eaf2fd318cee28
|
refs/heads/master
| 2023-08-20T18:25:48.898776
| 2023-07-20T01:35:48
| 2023-07-20T01:35:48
| 4,468,869
| 466
| 89
|
NOASSERTION
| 2023-07-20T01:35:49
| 2012-05-28T09:04:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,450
|
py
|
test_legacy_load.py
|
# %% IMPORTS
# Built-in imports
import glob
from os import path
import warnings
import pytest
import scipy.sparse
import numpy as np
# Package imports
import h5py
# hickle imports
import hickle as hkl
# %% FUNCTION DEFINITIONS
def test_legacy_load():
dirpath = path.dirname(__file__)
filelist = sorted(glob.glob(path.join(dirpath, 'legacy_hkls/*3_[0-9]_[0-9].hkl')))
# Make all warnings show
warnings.simplefilter("always")
for filename in filelist:
with pytest.warns(
UserWarning,
match = r"Input\s+argument\s+'file_obj'\s+appears\s+to\s+be\s+a\s+file\s+made"
r"\s+with\s+hickle\s+v3.\s+Using\s+legacy\s+load..."
):
try:
print(filename)
a = hkl.load(filename,path='test')
except Exception:
with h5py.File(filename) as a:
print(a.attrs.items())
print(a.items())
for key, item in a.items():
print(item.attrs.items())
raise
@pytest.mark.no_compression
def test_4_0_0_load():
"""
test that files created by hickle 4.0.x can be loaded by
hickle 4.1.x properly
"""
dirpath = path.dirname(__file__)
filelist = sorted(glob.glob(path.join(dirpath, 'legacy_hkls/*4.[0-9].[0-9].hkl')))
from hickle.tests.generate_legacy_4_0_0 import generate_py_object
compare_with,needs_compare = generate_py_object()
# strange but without forcing garbage collection here h5py might produce
# strange assuming a race related RuntimeError when h5py file is closed by
# hickle.load(). Unless observed in wildlife this is only triggered by fast successive
# calls of h5py methods.
import gc
gc.collect()
for filename in filelist:
content = hkl.load(filename)
if filename != needs_compare:
continue
for item_id,content_item,compare_item in ( (i,content[i],compare_with[i]) for i in range(len(compare_with)) ):
if scipy.sparse.issparse(content_item):
assert np.allclose(content_item.toarray(),compare_item.toarray())
continue
try:
assert content_item == compare_item
except ValueError:
assert np.all(content_item == compare_item)
# %% MAIN SCRIPT
if __name__ == "__main__":
test_legacy_load()
test_4_0_0_load()
|
7f15e9e829393765395a71b2d5d3fc662e1b4dbb
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/test/subprocessdata/fd_status.py
|
d12bd95abee61ceeb8f944736fc9c4e87ddfbd09
|
[
"CC-BY-4.0",
"MIT",
"Python-2.0"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 835
|
py
|
fd_status.py
|
"""When called as a script, print a comma-separated list of the open
file descriptors on stdout.
Usage:
fd_stats.py: check all file descriptors
fd_status.py fd1 fd2 ...: check only specified file descriptors
"""
import errno
import os
import stat
import sys
if __name__ == "__main__":
fds = []
if len(sys.argv) == 1:
try:
_MAXFD = os.sysconf("SC_OPEN_MAX")
except:
_MAXFD = 256
test_fds = range(0, _MAXFD)
else:
test_fds = map(int, sys.argv[1:])
for fd in test_fds:
try:
st = os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
continue
raise
# Ignore Solaris door files
if not stat.S_ISDOOR(st.st_mode):
fds.append(fd)
print(','.join(map(str, fds)))
|
eee41147c28e3cbee6aa3d78e0969aa066ad3a51
|
636849fc7edd9dcb095cf3410a121ab37de69f02
|
/SoftLayer/CLI/hardware/notification_add.py
|
3592a9d24519f478af2f24acb5cb0d4be753f990
|
[
"MIT"
] |
permissive
|
softlayer/softlayer-python
|
bcb09306c3367fdbd2f1407f770c4959729b074c
|
5798373055d9f34dfd531d81638a64d0a7901a13
|
refs/heads/master
| 2023-08-23T19:32:36.990701
| 2023-08-21T03:29:44
| 2023-08-21T03:29:44
| 622,291
| 126
| 182
|
MIT
| 2023-09-14T15:04:48
| 2010-04-21T20:36:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
notification_add.py
|
"""Create a user hardware notification entry."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('identifier')
@click.option('--users', multiple=True, required=True,
help='UserId to be notified on monitoring failure.')
@environment.pass_env
def cli(env, identifier, users):
"""Create a user hardware notification entry."""
hardware = SoftLayer.HardwareManager(env.client)
table = formatting.KeyValueTable(['Id', 'Hostmane', 'Username', 'Email', 'FirstName', 'Lastname'])
table.align['Id'] = 'r'
table.align['Username'] = 'l'
for user in users:
notification = hardware.add_notification(identifier, user)
if notification:
table.add_row([notification['id'], notification['hardware']['fullyQualifiedDomainName'],
notification['user']['username'], notification['user']['email'],
notification['user']['firstName'], notification['user']['lastName']])
else:
raise exceptions.CLIAbort(f"User not found: {user}.")
env.fout(table)
|
9289c5be9120d6769c5caeaa0a347b690e23b708
|
d0dcd3d2dd1f561a4fa52910884acdaa15a69a2b
|
/startop/scripts/app_startup/lib/app_runner_test.py
|
33d233b03aab8cf985a5d010b1e77e9486c68b13
|
[
"LicenseRef-scancode-unicode",
"Apache-2.0"
] |
permissive
|
LineageOS/android_frameworks_base
|
dfd3aebe9e241e073ba42986eaa44cd78878ea75
|
0101ae8a47ff484d43af3dde506d19cde4590f65
|
refs/heads/lineage-19.1
| 2023-08-31T16:18:48.850305
| 2023-06-03T01:09:31
| 2023-08-08T00:55:35
| 75,641,757
| 471
| 2,457
|
NOASSERTION
| 2022-10-02T20:14:41
| 2016-12-05T15:58:41
|
Java
|
UTF-8
|
Python
| false
| false
| 3,713
|
py
|
app_runner_test.py
|
# Copyright 2019, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the AppRunner."""
import os
import sys
from pathlib import Path
from app_runner import AppRunner, AppRunnerListener
from mock import Mock, call, patch
# The path is "frameworks/base/startop/scripts/"
sys.path.append(Path(os.path.realpath(__file__)).parents[2])
import lib.cmd_utils as cmd_utils
class AppRunnerTestListener(AppRunnerListener):
def preprocess(self) -> None:
cmd_utils.run_shell_command('pre'),
def postprocess(self, pre_launch_timestamp: str) -> None:
cmd_utils.run_shell_command('post'),
def metrics_selector(self, am_start_output: str,
pre_launch_timestamp: str) -> None:
return 'TotalTime=123\n'
RUNNER = AppRunner(package='music',
activity='MainActivity',
compiler_filter='speed',
timeout=None,
simulate=False)
def test_configure_compiler_filter():
with patch('lib.cmd_utils.run_shell_command',
new_callable=Mock) as mock_run_shell_command:
mock_run_shell_command.return_value = (True, 'speed arm64 kUpToDate')
RUNNER.configure_compiler_filter()
calls = [call(os.path.realpath(
os.path.join(RUNNER.DIR,
'../query_compiler_filter.py')) + ' --package music')]
mock_run_shell_command.assert_has_calls(calls)
def test_parse_metrics_output():
input = 'a1=b1\nc1=d1\ne1=f1'
ret = RUNNER.parse_metrics_output(input)
assert ret == [('a1', 'b1'), ('c1', 'd1'), ('e1', 'f1')]
def _mocked_run_shell_command(*args, **kwargs):
if args[0] == 'adb shell "date -u +\'%Y-%m-%d %H:%M:%S.%N\'"':
return (True, "2019-07-02 23:20:06.972674825")
elif args[0] == 'adb shell ps | grep "music" | awk \'{print $2;}\'':
return (True, '9999')
else:
return (True, 'a1=b1\nc1=d1=d2\ne1=f1')
@patch('app_startup.lib.adb_utils.blocking_wait_for_logcat_displayed_time')
@patch('lib.cmd_utils.run_shell_command')
def test_run(mock_run_shell_command,
mock_blocking_wait_for_logcat_displayed_time):
mock_run_shell_command.side_effect = _mocked_run_shell_command
mock_blocking_wait_for_logcat_displayed_time.return_value = 123
test_listener = AppRunnerTestListener()
RUNNER.add_callbacks(test_listener)
result = RUNNER.run()
RUNNER.remove_callbacks(test_listener)
calls = [call('pre'),
call(os.path.realpath(
os.path.join(RUNNER.DIR,
'../query_compiler_filter.py')) +
' --package music'),
call('adb shell "date -u +\'%Y-%m-%d %H:%M:%S.%N\'"'),
call(
'timeout {timeout} "{DIR}/launch_application" "{package}" "{activity}"'
.format(timeout=30,
DIR=os.path.realpath(os.path.dirname(RUNNER.DIR)),
package='music',
activity='MainActivity',
timestamp='2019-07-02 23:20:06.972674825')),
call('post')
]
mock_run_shell_command.assert_has_calls(calls)
assert result == [('TotalTime', '123')]
assert len(RUNNER.listeners) == 0
|
7279936509fdc6db8ecda89cb3bf30ab6ce039a5
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/scripts/tracking/siamrpn/benchmark.py
|
607d97e6e41a480262fd9dfc727a37f39b21054e
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294
| 2023-01-19T00:37:33
| 2023-01-19T00:37:33
| 122,896,249
| 6,064
| 1,458
|
Apache-2.0
| 2023-01-19T00:37:35
| 2018-02-26T01:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,590
|
py
|
benchmark.py
|
""" SiamRPN benchmark
Code adapted from https://github.com/STVIR/pysot"""
from glob import glob
import argparse
import os
from multiprocessing import Pool
from tqdm import tqdm
from gluoncv.utils.metrics.tracking import OPEBenchmark
from gluoncv.data.otb.tracking import OTBTracking as OTBDataset
def parse_args():
""" benchmark test."""
parser = argparse.ArgumentParser(description='tracking evaluation')
parser.add_argument('--tracker-path', '-p', type=str, help='test result path')
parser.add_argument('--dataset', '-d', default='OTB2015', type=str, help='dataset name')
parser.add_argument('--num', '-n', default=1, type=int, help='number of thread to eval')
parser.add_argument('--tracker-prefix', '-t', type=str, help='tracker name')
parser.add_argument('--show-video-level', '-s', action='store_true')
parser.add_argument('--test-dataset', type=str, help='test_json dataset dir')
parser.set_defaults(show_video_level=False)
opt = parser.parse_args()
return opt
def main():
"""SiamRPN benchmark.
evaluation according to txt of test result.now supports benchmark is Success and Precision
Currently only supports test OTB 2015 dataset.
Parameters
----------
tracker_path : str, txt of test result path.
tracker_prefix : str, model name.
test_dataset : str, Path to test label json.
"""
opt = parse_args()
tracker_dir = os.path.join(opt.tracker_path, opt.dataset)
trackers = glob(os.path.join(opt.tracker_path,
opt.dataset,
opt.tracker_prefix+'*'))
trackers = [x.split('/')[-1] for x in trackers]
assert len(trackers) > 0
opt.num = min(opt.num, len(trackers))
dataset = OTBDataset(name=opt.dataset, dataset_root=opt.test_dataset, load_img=False)
dataset.set_tracker(tracker_dir, trackers)
benchmark = OPEBenchmark(dataset)
success_ret = {}
with Pool(processes=opt.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_success, trackers),
desc='eval success', total=len(trackers), ncols=100):
success_ret.update(ret)
precision_ret = {}
with Pool(processes=opt.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_precision, trackers),
desc='eval precision', total=len(trackers), ncols=100):
precision_ret.update(ret)
benchmark.show_result(success_ret, precision_ret,
show_video_level=opt.show_video_level)
if __name__ == '__main__':
main()
|
f4ec8cfba3862056455190657a0a1b987e4c4861
|
0032d988541e85c47b5034c20ecf88220dde5a95
|
/openbook_auth/tests/views/test_proxy_auth.py
|
7d2e080707bb73a8f274e5d2b68c87eb943081af
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OkunaOrg/okuna-api
|
eabd37fef9d2be59b590ed8d72bee084ac377997
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
refs/heads/master
| 2022-02-04T21:31:10.577601
| 2021-12-28T18:20:39
| 2021-12-28T18:20:39
| 151,052,951
| 185
| 92
|
MIT
| 2022-01-13T01:00:40
| 2018-10-01T07:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,974
|
py
|
test_proxy_auth.py
|
from django.urls import reverse
from faker import Faker
from rest_framework import status
from rest_framework.test import APITestCase
import logging
from openbook_common.tests.helpers import make_authentication_headers_for_user, make_user, make_proxy_blacklisted_domain
fake = Faker()
logger = logging.getLogger(__name__)
class ProxyAuthAPITests(APITestCase):
"""
ProxyAuthAPI tests
"""
def test_header_required_for_proxy_auth(self):
"""
should return 403 if the X-Proxy-Url header is not present
"""
url = self._get_url()
user = make_user()
headers = make_authentication_headers_for_user(user)
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_proxy_auth_allows_non_blacklisted_domain(self):
"""
should return 403 if the X-Proxy-Url url is not in whitelisted
"""
url = self._get_url()
user = make_user()
headers = make_authentication_headers_for_user(user)
headers['HTTP_X_PROXY_URL'] = 'https://notblacklisted.com'
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_proxy_auth_disallows_blacklisted_domain(self):
"""
should return 403 if the X-Proxy-Url url is blacklisted
"""
url = self._get_url()
user = make_user()
headers = make_authentication_headers_for_user(user)
headers['HTTP_X_PROXY_URL'] = 'https://www.techcrunch.com'
make_proxy_blacklisted_domain(domain='techcrunch.com')
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_proxy_auth_disallows_invalid_domain(self):
"""
should return 403 if the X-Proxy-Url url is invalid
"""
url = self._get_url()
user = make_user()
headers = make_authentication_headers_for_user(user)
headers['HTTP_X_PROXY_URL'] = 'https://wwwinvalic.poptaer'
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_proxy_auth_disallows_blacklisted_root_domain(self):
"""
should disallow when calling with a blacklisted root domain and return 403
"""
url = self._get_url()
user = make_user()
headers = make_authentication_headers_for_user(user)
make_proxy_blacklisted_domain(domain='blogspot.com')
headers['HTTP_X_PROXY_URL'] = 'test.blogspot.com'
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_proxy_auth_disallows_blacklisted_subdomain_domain(self):
"""
should disallow when calling with a blacklisted subdomain domain and return 403
"""
url = self._get_url()
user = make_user()
headers = make_authentication_headers_for_user(user)
make_proxy_blacklisted_domain(domain='test.blogspot.com')
headers['HTTP_X_PROXY_URL'] = 'test.blogspot.com'
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_allows_non_blacklisted_root_domain_with_blacklisted_subdomain(self):
"""
should allow when calling with a non blacklisted root domain that also has a blacklisted subdomain and return 403
"""
url = self._get_url()
user = make_user()
headers = make_authentication_headers_for_user(user)
make_proxy_blacklisted_domain(domain='test.blogspot.com')
headers['HTTP_X_PROXY_URL'] = 'blogspot.com'
response = self.client.get(url, **headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def _get_url(self):
return reverse('proxy-auth')
|
4a560faaf0d5bdd24231600bd2b8b33eb54bdd34
|
9f2c4290576d17fdb4f0b05f2790a8a300576d44
|
/scripts/extract.py
|
9e5bcb35a9d91e792b01a0c8f80e4f81885e1f20
|
[
"MIT",
"CC-BY-4.0",
"LicenseRef-scancode-proprietary-license",
"CC-BY-3.0"
] |
permissive
|
facebookresearch/esm
|
1e00896197f3beaacb66bc6eece7da5df1a57e37
|
2b369911bb5b4b0dda914521b9475cad1656b2ac
|
refs/heads/main
| 2023-08-24T21:11:59.756186
| 2023-06-27T15:29:55
| 2023-06-27T15:29:55
| 291,783,502
| 2,293
| 510
|
MIT
| 2023-09-07T03:14:39
| 2020-08-31T17:41:48
|
Python
|
UTF-8
|
Python
| false
| false
| 5,161
|
py
|
extract.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import pathlib
import torch
from esm import Alphabet, FastaBatchedDataset, ProteinBertModel, pretrained, MSATransformer
def create_parser():
parser = argparse.ArgumentParser(
description="Extract per-token representations and model outputs for sequences in a FASTA file" # noqa
)
parser.add_argument(
"model_location",
type=str,
help="PyTorch model file OR name of pretrained model to download (see README for models)",
)
parser.add_argument(
"fasta_file",
type=pathlib.Path,
help="FASTA file on which to extract representations",
)
parser.add_argument(
"output_dir",
type=pathlib.Path,
help="output directory for extracted representations",
)
parser.add_argument("--toks_per_batch", type=int, default=4096, help="maximum batch size")
parser.add_argument(
"--repr_layers",
type=int,
default=[-1],
nargs="+",
help="layers indices from which to extract representations (0 to num_layers, inclusive)",
)
parser.add_argument(
"--include",
type=str,
nargs="+",
choices=["mean", "per_tok", "bos", "contacts"],
help="specify which representations to return",
required=True,
)
parser.add_argument(
"--truncation_seq_length",
type=int,
default=1022,
help="truncate sequences longer than the given value",
)
parser.add_argument("--nogpu", action="store_true", help="Do not use GPU even if available")
return parser
def run(args):
model, alphabet = pretrained.load_model_and_alphabet(args.model_location)
model.eval()
if isinstance(model, MSATransformer):
raise ValueError(
"This script currently does not handle models with MSA input (MSA Transformer)."
)
if torch.cuda.is_available() and not args.nogpu:
model = model.cuda()
print("Transferred model to GPU")
dataset = FastaBatchedDataset.from_file(args.fasta_file)
batches = dataset.get_batch_indices(args.toks_per_batch, extra_toks_per_seq=1)
data_loader = torch.utils.data.DataLoader(
dataset, collate_fn=alphabet.get_batch_converter(args.truncation_seq_length), batch_sampler=batches
)
print(f"Read {args.fasta_file} with {len(dataset)} sequences")
args.output_dir.mkdir(parents=True, exist_ok=True)
return_contacts = "contacts" in args.include
assert all(-(model.num_layers + 1) <= i <= model.num_layers for i in args.repr_layers)
repr_layers = [(i + model.num_layers + 1) % (model.num_layers + 1) for i in args.repr_layers]
with torch.no_grad():
for batch_idx, (labels, strs, toks) in enumerate(data_loader):
print(
f"Processing {batch_idx + 1} of {len(batches)} batches ({toks.size(0)} sequences)"
)
if torch.cuda.is_available() and not args.nogpu:
toks = toks.to(device="cuda", non_blocking=True)
out = model(toks, repr_layers=repr_layers, return_contacts=return_contacts)
logits = out["logits"].to(device="cpu")
representations = {
layer: t.to(device="cpu") for layer, t in out["representations"].items()
}
if return_contacts:
contacts = out["contacts"].to(device="cpu")
for i, label in enumerate(labels):
args.output_file = args.output_dir / f"{label}.pt"
args.output_file.parent.mkdir(parents=True, exist_ok=True)
result = {"label": label}
truncate_len = min(args.truncation_seq_length, len(strs[i]))
# Call clone on tensors to ensure tensors are not views into a larger representation
# See https://github.com/pytorch/pytorch/issues/1995
if "per_tok" in args.include:
result["representations"] = {
layer: t[i, 1 : truncate_len + 1].clone()
for layer, t in representations.items()
}
if "mean" in args.include:
result["mean_representations"] = {
layer: t[i, 1 : truncate_len + 1].mean(0).clone()
for layer, t in representations.items()
}
if "bos" in args.include:
result["bos_representations"] = {
layer: t[i, 0].clone() for layer, t in representations.items()
}
if return_contacts:
result["contacts"] = contacts[i, : truncate_len, : truncate_len].clone()
torch.save(
result,
args.output_file,
)
def main():
parser = create_parser()
args = parser.parse_args()
run(args)
if __name__ == "__main__":
main()
|
6648a3c03ed5976e198fecf51480de01a31225d5
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/io/misc/asdf/tags/table/table.py
|
4afaaf3b5fa078b62322e430b2f93c15a3a42c95
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,918
|
py
|
table.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from asdf import tagged
from asdf import yamlutil
from asdf.tags.core.ndarray import NDArrayType
from astropy import table
from astropy.io.misc.asdf.types import AstropyType, AstropyAsdfType
class TableType:
"""
This class defines to_tree and from_tree methods that are used by both the
AstropyTableType and the AsdfTableType defined below. The behavior is
differentiated by the ``_compat`` class attribute. When ``_compat==True``,
the behavior will conform to the table schema defined by the ASDF Standard.
Otherwise, the behavior will conform to the custom table schema defined by
Astropy.
"""
_compat = False
@classmethod
def from_tree(cls, node, ctx):
# This is getting meta, guys
meta = node.get('meta', {})
# This enables us to support files that use the table definition from
# the ASDF Standard, rather than the custom one that Astropy defines.
if cls._compat:
columns = [
yamlutil.tagged_tree_to_custom_tree(col, ctx)
for col in node['columns']
]
return table.Table(columns, meta=meta)
if node.get('qtable', False):
t = table.QTable(meta=node.get('meta', {}))
else:
t = table.Table(meta=node.get('meta', {}))
for name, col in zip(node['colnames'], node['columns']):
t[name] = yamlutil.tagged_tree_to_custom_tree(col, ctx)
return t
@classmethod
def to_tree(cls, data, ctx):
columns = []
for name in data.colnames:
thiscol = data[name]
column = yamlutil.custom_tree_to_tagged_tree(thiscol, ctx)
columns.append(column)
node = dict(columns=columns)
# Files that use the table definition from the ASDF Standard (instead
# of the one defined by Astropy) will not contain these fields
if not cls._compat:
node['colnames'] = data.colnames
node['qtable'] = isinstance(data, table.QTable)
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
try:
NDArrayType.assert_equal(np.array(old), np.array(new))
except (AttributeError, TypeError, ValueError):
for col0, col1 in zip(old, new):
try:
NDArrayType.assert_equal(np.array(col0), np.array(col1))
except (AttributeError, TypeError, ValueError):
assert col0 == col1
class AstropyTableType(TableType, AstropyType):
"""
This tag class reads and writes tables that conform to the custom schema
that is defined by Astropy (in contrast to the one that is defined by the
ASDF Standard). The primary reason for differentiating is to enable the
support of Astropy mixin columns, which are not supported by the ASDF
Standard.
"""
name = 'table/table'
types = ['astropy.table.Table']
requires = ['astropy']
class AsdfTableType(TableType, AstropyAsdfType):
"""
This tag class allows Astropy to read (and write) ASDF files that use the
table definition that is provided by the ASDF Standard (instead of the
custom one defined by Astropy). This is important to maintain for
cross-compatibility.
"""
name = 'core/table'
types = ['astropy.table.Table']
requires = ['astropy']
_compat = True
class ColumnType(AstropyAsdfType):
name = 'core/column'
types = ['astropy.table.Column', 'astropy.table.MaskedColumn']
requires = ['astropy']
handle_dynamic_subclasses = True
@classmethod
def from_tree(cls, node, ctx):
data = yamlutil.tagged_tree_to_custom_tree(
node['data'], ctx)
name = node['name']
description = node.get('description')
unit = node.get('unit')
meta = node.get('meta', None)
return table.Column(
data=data._make_array(), name=name, description=description,
unit=unit, meta=meta)
@classmethod
def to_tree(cls, data, ctx):
node = {
'data': yamlutil.custom_tree_to_tagged_tree(
data.data, ctx),
'name': data.name
}
if data.description:
node['description'] = data.description
if data.unit:
node['unit'] = yamlutil.custom_tree_to_tagged_tree(
data.unit, ctx)
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
assert old.description == new.description
assert old.unit == new.unit
NDArrayType.assert_equal(np.array(old), np.array(new))
|
a3b18b3a77cd32ce389c1c87cf508d8428237f82
|
c1ab5fc6d37749cf7dd693a8f6d5475dfa54cd45
|
/kubernetes/__init__.py
|
0704940b912a928e2881f9252a8f009e6af8c611
|
[
"Apache-2.0"
] |
permissive
|
kubernetes-client/python
|
2d10e5d7c1358aa4473c1fcd54d2c5a1085cf56e
|
68d5a1479e7d735ea454021bc54e453c9b31baf7
|
refs/heads/master
| 2023-09-01T11:23:54.508420
| 2023-08-31T21:04:31
| 2023-08-31T21:04:31
| 72,473,727
| 5,792
| 3,654
|
Apache-2.0
| 2023-09-13T18:34:16
| 2016-10-31T20:08:03
|
Python
|
UTF-8
|
Python
| false
| false
| 853
|
py
|
__init__.py
|
# Copyright 2022 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__project__ = 'kubernetes'
# The version is auto-updated. Please do not edit.
__version__ = "28.0.0+snapshot"
from . import client
from . import config
from . import dynamic
from . import watch
from . import stream
from . import utils
from . import leaderelection
|
63464b74383dae369a5336cae4c4305593c8acc1
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/bert_extraction/steal_bert_classifier/embedding_perturbations/mixup_bert_embeddings.py
|
2f73a450d2c5b4a3be97e9d4ce9301ad8b3d8825
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,164
|
py
|
mixup_bert_embeddings.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculate the interpolations between two sentences using mixup on BERT embeddings using mixup (https://arxiv.org/abs/1710.09412)."""
import os
from bert import modeling
from bert import tokenization
from bert_extraction.steal_bert_classifier.embedding_perturbations import embedding_util as em_util
from bert_extraction.steal_bert_classifier.models import run_classifier as rc
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("interpolate_scheme", "beta",
"Interpolation scheme between input points.")
flags.DEFINE_float("alpha", 0.4,
"The alpha value for sampling Beta distribution.")
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"sst-2": rc.SST2Processor,
"mnli": rc.MnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
task_name = FLAGS.task_name.lower()
processor = processors[task_name]()
label_list = processor.get_labels()
predict_examples = processor.get_test_examples(FLAGS.predict_input_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
predict_file = os.path.join(FLAGS.output_dir,
"mixup_%s.tf_record" % FLAGS.exp_name)
rc.file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
predict_input_fn = rc.file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=False)
predict_dataset = predict_input_fn({"batch_size": FLAGS.predict_batch_size})
predict_iterator1 = predict_dataset.make_one_shot_iterator()
predict_iterator2 = predict_dataset.make_one_shot_iterator()
predict_dict1 = predict_iterator1.get_next()
predict_dict2 = predict_iterator2.get_next()
# Extract only the BERT non-contextual word embeddings, see their outputs
embed1_out, embed_var = em_util.run_bert_embeddings(
predict_dict1["input_ids"], bert_config)
embed2_out, _ = em_util.run_bert_embeddings(predict_dict2["input_ids"],
bert_config)
if FLAGS.interpolate_scheme == "beta":
# Interpolate two embeddings using samples from a beta(alpha, alpha) distro
beta_distro = tf.distributions.Beta(FLAGS.alpha, FLAGS.alpha)
interpolate = beta_distro.sample()
elif FLAGS.interpolate_scheme == "fixed":
# Interpolate two embeddings using a fixed interpolation constant
interpolate = tf.constant(FLAGS.alpha)
new_embed = interpolate * embed1_out + (1 - interpolate) * embed2_out
# Get nearest neighbour in embedding space for interpolated embeddings
nearest_neighbour, _ = em_util.get_nearest_neighbour(
source=new_embed, reference=embed_var)
nearest_neighbour = tf.cast(nearest_neighbour, tf.int32)
# Check whether nearest neighbour is a new word
new_vectors = tf.logical_and(
tf.not_equal(nearest_neighbour, predict_dict1["input_ids"]),
tf.not_equal(nearest_neighbour, predict_dict2["input_ids"]))
# Combine the two input masks
token_mask = tf.logical_or(
tf.cast(predict_dict1["input_mask"], tf.bool),
tf.cast(predict_dict2["input_mask"], tf.bool))
# Mask out new vectors with original tokens mask
new_vectors_masked = tf.logical_and(new_vectors, token_mask)
tvars = tf.trainable_variables()
assignment_map, _ = modeling.get_assignment_map_from_checkpoint(
tvars, FLAGS.init_checkpoint)
tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
total_score = 0
total_tokens = 0
total_steps = len(predict_examples) // FLAGS.predict_batch_size + 1
# Count the total words where new embeddings are produced via interpolation
all_predict_input1 = []
all_predict_input2 = []
all_nearest_neighbours = []
for i in range(total_steps):
tf.logging.info("%d/%d, total_score = %d / %d", i, total_steps, total_score,
total_tokens)
pd1, pd2, nn, tm, nvm = sess.run([
predict_dict1, predict_dict2, nearest_neighbour, token_mask,
new_vectors_masked
])
# populate global lists of inputs and mix-ups
all_nearest_neighbours.extend(nn.tolist())
all_predict_input1.extend(pd1["input_ids"].tolist())
all_predict_input2.extend(pd2["input_ids"].tolist())
total_score += nvm.sum()
total_tokens += tm.sum()
tf.logging.info("Total score = %d", total_score)
with tf.gfile.GFile(FLAGS.predict_output_file, "w") as f:
for pd1, pd2, nn in zip(all_predict_input1, all_predict_input2,
all_nearest_neighbours):
pd1_sent = " ".join(tokenizer.convert_ids_to_tokens(pd1))
pd2_sent = " ".join(tokenizer.convert_ids_to_tokens(pd2))
nn_sent = " ".join(tokenizer.convert_ids_to_tokens(nn))
full_line = pd1_sent + "\t" + pd2_sent + "\t" + nn_sent + "\n"
f.write(full_line)
if __name__ == "__main__":
flags.mark_flag_as_required("output_dir")
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
tf.app.run()
|
5ea9e5a97b900e4d5cfadd19380e24f2b1bb5516
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoBTag/PerformanceDB/python/Pool_MISTAGSSVHEM.py
|
18f26d600261a52e87deac6f784a1673a9248683
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 737
|
py
|
Pool_MISTAGSSVHEM.py
|
import FWCore.ParameterSet.Config as cms
from CondCore.DBCommon.CondDBCommon_cfi import *
PoolDBESSource = cms.ESSource("PoolDBESSource",
CondDBCommon,
toGet = cms.VPSet(
#
# working points
#
cms.PSet(
record = cms.string('PerformancePayloadRecord'),
tag = cms.string('BTagMISTAGSSVHEMtable_v2_offline'),
label = cms.untracked.string('BTagMISTAGSSVHEMtable_v2_offline')
),
cms.PSet(
record = cms.string('PerformanceWPRecord'),
tag = cms.string('BTagMISTAGSSVHEMwp_v2_offline'),
label = cms.untracked.string('BTagMISTAGSSVHEMwp_v2_offline')
),
))
PoolDBESSource.connect = 'frontier://FrontierProd/CMS_COND_31X_PHYSICSTOOLS'
|
8463ccd223e8ae8169d898631fb35733b4635923
|
29b62d060fcb01eca2e319b70f4ca7b5c259d7c4
|
/alita/serve/server.py
|
06ec05cf7d0b2b9abad70154422625bf44f7b720
|
[] |
no_license
|
dwpy/alita
|
eb4d58372b0d9d2988e63656511c61d8d6f88990
|
1d8a1565bc771e5ff16b454147cb44eadd19d237
|
refs/heads/master
| 2021-06-17T20:42:23.402187
| 2019-06-12T08:53:19
| 2019-06-12T08:53:19
| 174,462,948
| 119
| 2
| null | 2021-06-11T17:49:16
| 2019-03-08T03:31:05
|
Python
|
UTF-8
|
Python
| false
| false
| 19,539
|
py
|
server.py
|
import os
import logging
import httptools
import signal
import asyncio
import functools
import traceback
from datetime import datetime
from alita.serve.utils import *
from urllib.parse import unquote
from websockets import handshake, InvalidHandshake, WebSocketCommonProtocol
HIGH_WATER_LIMIT = 65536
class ServiceUnavailable:
def __init__(self, body=None, status=503, headers=None, content_type="text/plain"):
self.content_type = content_type
self.body = self._encode_body(body)
self.status = status
self._cookies = None
self.headers = headers or {}
def _encode_body(self, data):
try:
return data.encode()
except AttributeError:
return str(data or "").encode()
async def __call__(self, environ, on_response):
on_response(self)
def output(self, version="1.1", keep_alive=False, keep_alive_timeout=None):
# This is all returned in a kind-of funky way
# We tried to make this as fast as possible in pure python
body, timeout_header = b"", b""
if keep_alive and keep_alive_timeout is not None:
timeout_header = b"Keep-Alive: %d\r\n" % keep_alive_timeout
self.headers["Content-Type"] = self.headers.get(
"Content-Type", self.content_type
)
headers = self._parse_headers()
description = b'Service Unavailable'
return (
b"HTTP/%b %d %b\r\n" b"Connection: %b\r\n" b"%b" b"%b\r\n" b"%b"
) % (
version.encode(),
self.status,
description,
b"keep-alive" if keep_alive else b"close",
timeout_header,
headers,
body,
)
def _parse_headers(self):
headers = b""
for name, value in self.headers.items():
try:
headers += b"%b: %b\r\n" % (
name.encode(),
value.encode("utf-8"),
)
except AttributeError:
headers += b"%b: %b\r\n" % (
str(name).encode(),
str(value).encode("utf-8"),
)
return headers
class HttpProtocol(asyncio.Protocol):
DEFAULT_TYPE = "http"
DEFAULT_VERSION = "1.1"
def __init__(self, app, config, server_state):
self.app = app
self.config = config
self.loop = config.loop
self.logger = config.logger
self.access_log = config.access_log and (self.logger.level <= logging.INFO)
self.protocol = config.protocol
self.root_path = config.root_path
self.limit_concurrency = config.limit_concurrency
self.keep_alive_timeout = config.keep_alive_timeout
self.debug = config.debug
# Timeouts
self._request_timeout_handler = None
self._response_timeout_handler = None
self.timeout_keep_alive_task = None
self.timeout_keep_alive = config.timeout_keep_alive
# Global state
self.server_state = server_state
self.connections = server_state.connections
self.tasks = server_state.tasks
self.default_headers = server_state.default_headers + config.default_headers
# Per-connection state
self.transport = None
self.server = None
self.client = None
self.scheme = None
self.parser = None
self.websocket = None
self.pipeline = []
# Per-request state
self.url = None
self.environ = None
self.body = b""
self.more_body = True
self.headers = []
self.expect_100_continue = False
self.message_event = asyncio.Event()
self.message_event.set()
# Protocol interface
def connection_made(self, transport):
self.connections.add(self)
self.transport = transport
self.server = get_local_addr(transport)
self.client = get_remote_addr(transport)
self.scheme = "https" if is_ssl(transport) else "http"
if self.logger.level <= logging.DEBUG:
self.logger.debug("%s - Connected", self.client)
self._request_timeout_handler = self.loop.call_later(
self.config.request_timeout, self.request_timeout_callback
)
def connection_lost(self, exc):
self.connections.discard(self)
if self.logger.level <= logging.DEBUG:
self.logger.debug("%s - Disconnected", self.client)
self.message_event.set()
def request_timeout_callback(self):
self.shutdown()
def response_timeout_callback(self):
self.shutdown()
def cancel_timeout_keep_alive_task(self):
if self.timeout_keep_alive_task is not None:
self.timeout_keep_alive_task.cancel()
self.timeout_keep_alive_task = None
if self._request_timeout_handler is not None:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self._response_timeout_handler is not None:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
def data_received(self, data):
self.cancel_timeout_keep_alive_task()
try:
if self.parser is None:
self.headers = []
self.parser = httptools.HttpRequestParser(self)
self.parser.feed_data(data)
except httptools.parser.errors.HttpParserError as exc:
msg = "Invalid HTTP request received."
if self.debug:
msg += "\n" + traceback.format_exc()
self.logger.error(msg)
self.on_response(msg)
except httptools.HttpParserUpgrade as exc:
#self.handle_upgrade()
pass
def handle_upgrade(self):
upgrade_value = None
for name, value in self.headers:
if name == b"upgrade":
upgrade_value = value.lower()
if upgrade_value != b"websocket" or self.protocol is None:
msg = "Unsupported upgrade request."
self.logger.warning(msg)
content = [STATUS_TEXT[400]]
for name, value in self.default_headers:
content.extend([name, b": ", value, b"\r\n"])
content.extend(
[
b"content-type: text/plain; charset=utf-8\r\n",
b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n",
b"connection: close\r\n",
b"\r\n",
msg.encode("ascii"),
]
)
self.transport.write(b"".join(content))
self.transport.close()
return
self.connections.discard(self)
method = self.environ["method"].encode()
output = [method, b" ", self.url, b" HTTP/1.1\r\n"]
for name, value in self.environ["headers"]:
output += [name, b": ", value, b"\r\n"]
output.append(b"\r\n")
protocol = self.protocol(
app=self.app,
config=self.config,
server_state=self.server_state
)
protocol.connection_made(self.transport)
protocol.data_received(b"".join(output))
self.transport.set_protocol(protocol)
# Parser callbacks
def on_url(self, url):
parsed_url = httptools.parse_url(url)
path = parsed_url.path.decode("ascii")
if "%" in path:
path = unquote(path)
self.url = url
self.expect_100_continue = False
self.environ = {
"url": url.decode(),
"parsed_url": parsed_url,
"type": self.DEFAULT_TYPE,
"http_version": self.DEFAULT_VERSION,
"server": self.server,
"client": self.client,
"scheme": self.scheme,
"ip": self.server[0],
"port": int(self.server[1]),
"path": path,
"query_string": (parsed_url.query if parsed_url.query else b"").decode(),
}
def on_header(self, name: bytes, value: bytes):
name = name.lower()
if name == b"expect" and value.lower() == b"100-continue":
self.expect_100_continue = True
self.headers.append((name.decode("ascii"), value.decode("ascii")))
def on_headers_complete(self):
http_version = self.parser.get_http_version()
if http_version != self.DEFAULT_VERSION:
self.environ["http_version"] = http_version
self.environ.update(
protocol=self,
method=self.parser.get_method().decode("ascii"),
transport=self.transport,
logger=self.logger,
root_path=self.root_path,
access_log=self.access_log,
expect_100_continue=self.expect_100_continue,
keep_alive=http_version != "1.0",
keep_alive_timeout=self.keep_alive_timeout,
headers=self.headers,
default_headers=self.default_headers,
)
def on_body(self, body: bytes):
self.body += body
if len(self.body) > HIGH_WATER_LIMIT:
self.transport.pause_reading()
self.message_event.set()
def on_message_complete(self):
self.more_body = False
self.message_event.set()
self.cancel_timeout_keep_alive_task()
self.environ.update(body=self.body.decode())
self.process_request()
def log_response(self, response):
if self.access_log:
self.logger.info('[access] %s - - [%s] "%s %s" %s -',
self.environ['ip'],
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
self.environ['method'],
self.environ['path'],
response.status)
def process_request(self):
# Standard case - start processing the request.
# Handle 503 responses when 'limit_concurrency' is exceeded.
self._response_timeout_handler = self.loop.call_later(
self.config.response_timeout, self.response_timeout_callback
)
if self.limit_concurrency is not None and (
len(self.connections) >= self.limit_concurrency
or len(self.tasks) >= self.limit_concurrency
):
app = ServiceUnavailable()
message = "Exceeded concurrency limit."
self.logger.warning(message)
else:
app = self.app
task = self.loop.create_task(app(self.environ, self.on_response))
task.add_done_callback(self.tasks.discard)
self.tasks.add(task)
async def on_response(self, response):
# Callback for pipelined HTTP requests to be started.
self.server_state.total_requests += 1
if isinstance(response, str):
output_content = response
else:
response.set_protocol(self)
output_content = await response.output(
self.environ["http_version"],
self.environ["keep_alive"],
self.environ["keep_alive_timeout"]
)
self.transport.write(output_content)
self.log_response(response)
self.cancel_timeout_keep_alive_task()
if not self.transport.is_closing():
self.transport.close()
self.transport = None
else:
# Set a short Keep-Alive timeout.
self.timeout_keep_alive_task = self.loop.call_later(
self.timeout_keep_alive, self.timeout_keep_alive_handler
)
def shutdown(self):
"""
Called by the server to commence a graceful shutdown.
"""
if not self.transport.is_closing():
self.transport.close()
def pause_writing(self):
"""
Called by the transport when the write buffer exceeds the high water mark.
"""
self.message_event.clear()
def resume_writing(self):
"""
Called by the transport when the write buffer drops below the low water mark.
"""
self.message_event.set()
async def drain(self):
await self.message_event.wait()
def timeout_keep_alive_handler(self):
"""
Called on a keep-alive connection if no new data is received after a short delay.
"""
self.shutdown()
def push_data(self, data):
self.transport.write(data)
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
class WebSocketProtocol(HttpProtocol):
def request_timeout_callback(self):
if self.websocket is None:
super().request_timeout_callback()
def response_timeout_callback(self):
if self.websocket is None:
super().response_timeout_callback()
def timeout_keep_alive_handler(self):
if self.websocket is None:
super().timeout_keep_alive_handler()
def connection_lost(self, exc):
if self.websocket is not None:
self.websocket.connection_lost(exc)
super().connection_lost(exc)
def data_received(self, data):
if self.websocket is not None:
self.websocket.data_received(data)
else:
try:
super().data_received(data)
except httptools.HttpParserUpgrade:
pass
def write_response(self, response):
if self.websocket is not None:
self.transport.close()
else:
super().on_response(response)
async def websocket_handshake(self, request, subprotocols=None):
headers = {}
try:
key = handshake.check_request(request.headers)
handshake.build_response(headers, key)
except InvalidHandshake:
msg = "Invalid websocket request received."
if self.debug:
msg += "\n" + traceback.format_exc()
self.logger.error(msg)
self.on_response(msg)
raise RuntimeError(msg)
subprotocol = None
if subprotocols and "Sec-Websocket-Protocol" in request.headers:
# select a subprotocol
client_subprotocols = [
p.strip()
for p in request.headers["Sec-Websocket-Protocol"].split(",")
]
for p in client_subprotocols:
if p in subprotocols:
subprotocol = p
headers["Sec-Websocket-Protocol"] = subprotocol
break
# write the 101 response back to the client
rv = b"HTTP/1.1 101 Switching Protocols\r\n"
for k, v in headers.items():
rv += k.encode("utf-8") + b": " + v.encode("utf-8") + b"\r\n"
rv += b"\r\n"
request.transport.write(rv)
# hook up the websocket protocol
self.websocket = WebSocketCommonProtocol(
timeout=self.config.ws_timeout,
max_size=self.config.ws_max_size,
max_queue=self.config.ws_max_queue,
read_limit=self.config.ws_read_limit,
write_limit=self.config.ws_write_limit,
)
self.websocket.subprotocol = subprotocol
self.websocket.connection_made(request.transport)
self.websocket.connection_open()
return self.websocket
class ServerState:
"""
Shared servers state that is available between all protocol instances.
"""
def __init__(self, total_requests=0, connections=None, tasks=None, default_headers=None):
self.total_requests = total_requests
self.connections = connections or set()
self.tasks = tasks or set()
self.default_headers = default_headers or []
class Server(object):
def __init__(self, app, config, server_state=None):
self.app = app
self.config = config
self.started = False
self.loop = config.loop
self.logger = config.logger
self.socket = config.socket
self.servers = []
self.server_state = server_state or ServerState()
self.app.loop = self.loop
if self.config.debug:
self.loop.set_debug(True)
def install_signal_handlers(self):
try:
for sig in (signal.SIGINT, signal.SIGTERM):
self.loop.add_signal_handler(sig, self.loop.stop)
except NotImplementedError:
self.logger.warning("loop.add_signal_handler not implemented on this platform.")
def run(self):
protocol = self.config.protocol
if protocol is None:
protocol = WebSocketProtocol if self.app.is_websocket else HttpProtocol
server = functools.partial(
protocol,
app=self.app,
config=self.config,
server_state=self.server_state
)
asyncio_server_kwargs = (
self.config.asyncio_server_kwargs if self.config.asyncio_server_kwargs else {}
)
server_coroutine = self.loop.create_server(
server,
self.config.host,
self.config.port,
ssl=self.config.ssl,
reuse_port=self.config.reuse_port,
sock=self.socket,
backlog=self.config.backlog,
**asyncio_server_kwargs
)
if self.config.run_async:
return server_coroutine
try:
http_server = self.loop.run_until_complete(server_coroutine)
except BaseException:
self.logger.exception("Unable to start server")
return
self.install_signal_handlers()
pid = os.getpid()
try:
self.started = True
self.servers = [server]
self.logger.info("Starting worker [%s]", pid)
message = "Server running on http://%s:%d (Press CTRL+C to quit)"
self.logger.info(message % (self.config.host, self.config.port))
self.loop.run_forever()
finally:
self.logger.info("Stopping worker [%s]", pid)
# Wait for event loop to finish and all connections to drain
http_server.close()
self.loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
for connection in self.server_state.connections:
connection.shutdown()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
start_shutdown = 0
while self.server_state.connections and (
start_shutdown < self.config.graceful_shutdown_timeout):
self.loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in self.server_state.connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros, loop=self.loop)
self.loop.run_until_complete(_shutdown)
self.loop.close()
__all__ = [
"HttpProtocol",
"Server",
"ServerState"
]
|
3a49ba44fb6bbc19202f2c2e8b98671ff0f93d14
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/inv/migrations/0029_interface_profile_match_label.py
|
ffd4479b399ab6023755deed3272fdd125509e4e
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,095
|
py
|
0029_interface_profile_match_label.py
|
# ----------------------------------------------------------------------
# Add InterfaceProfile match label to interfaces
# ----------------------------------------------------------------------
# Copyright (C) 2007-2022 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
import bson
from pymongo import UpdateMany, InsertOne
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
depends_on = [("inv", "0021_labels")]
def migrate(self):
bulk = []
labels = []
labels_bulk = [
InsertOne(
{
"_id": bson.ObjectId(),
"name": "noc::interface_profile::*",
"description": "Match Label for InterfaceProfile",
"bg_color1": 15105570,
"bg_color2": 15965202,
"is_protected": False,
"propagate": True,
}
)
]
for ip in self.mongo_db["noc.interface_profiles"].find({}, {"_id": 1, "name": 1}):
labels.append(f"noc::interface_profile::{ip['name']}::=")
bulk += [
UpdateMany(
{"profile": ip["_id"]},
{"$addToSet": {"effective_labels": f"noc::interface_profile::{ip['name']}::="}},
)
]
for ll in labels:
labels_bulk += [
InsertOne(
{
"_id": bson.ObjectId(),
"name": ll,
"description": "Match Label for InterfaceProfile",
"bg_color1": 15105570,
"bg_color2": 15965202,
"is_protected": False,
# Label scope
"enable_agent": False,
"enable_service": False,
"enable_serviceprofile": False,
"enable_managedobject": False,
"enable_managedobjectprofile": False,
"enable_administrativedomain": False,
"enable_authprofile": False,
"enable_commandsnippet": False,
#
"enable_allocationgroup": False,
"enable_networksegment": False,
"enable_object": False,
"enable_objectmodel": False,
"enable_platform": False,
"enable_resourcegroup": False,
"enable_sensorprofile": False,
# Exposition scope
"expose_metric": False,
"expose_managedobject": False,
}
)
]
if bulk:
self.mongo_db["noc.interfaces"].bulk_write(bulk)
if labels_bulk:
self.mongo_db["labels"].bulk_write(labels_bulk)
|
adc92cccf95dc090c5ebfca098a39c6002184b04
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-CloudKit/PyObjCTest/test_ckerror.py
|
e8f579520edb97a5423081da155f90f617d6c47b
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,120
|
py
|
test_ckerror.py
|
from PyObjCTools.TestSupport import TestCase, min_os_level
import CloudKit
class TestCKError(TestCase):
def test_enum_types(self):
self.assertIsEnumType(CloudKit.CKErrorCode)
@min_os_level("10.10")
def testConstants(self):
self.assertIsInstance(CloudKit.CKErrorDomain, str)
self.assertIsInstance(CloudKit.CKPartialErrorsByItemIDKey, str)
self.assertIsInstance(CloudKit.CKRecordChangedErrorAncestorRecordKey, str)
self.assertIsInstance(CloudKit.CKRecordChangedErrorServerRecordKey, str)
self.assertIsInstance(CloudKit.CKRecordChangedErrorClientRecordKey, str)
self.assertIsInstance(CloudKit.CKErrorRetryAfterKey, str)
self.assertEqual(CloudKit.CKErrorInternalError, 1)
self.assertEqual(CloudKit.CKErrorPartialFailure, 2)
self.assertEqual(CloudKit.CKErrorNetworkUnavailable, 3)
self.assertEqual(CloudKit.CKErrorNetworkFailure, 4)
self.assertEqual(CloudKit.CKErrorBadContainer, 5)
self.assertEqual(CloudKit.CKErrorServiceUnavailable, 6)
self.assertEqual(CloudKit.CKErrorRequestRateLimited, 7)
self.assertEqual(CloudKit.CKErrorMissingEntitlement, 8)
self.assertEqual(CloudKit.CKErrorNotAuthenticated, 9)
self.assertEqual(CloudKit.CKErrorPermissionFailure, 10)
self.assertEqual(CloudKit.CKErrorUnknownItem, 11)
self.assertEqual(CloudKit.CKErrorInvalidArguments, 12)
self.assertEqual(CloudKit.CKErrorResultsTruncated, 13)
self.assertEqual(CloudKit.CKErrorServerRecordChanged, 14)
self.assertEqual(CloudKit.CKErrorServerRejectedRequest, 15)
self.assertEqual(CloudKit.CKErrorAssetFileNotFound, 16)
self.assertEqual(CloudKit.CKErrorAssetFileModified, 17)
self.assertEqual(CloudKit.CKErrorIncompatibleVersion, 18)
self.assertEqual(CloudKit.CKErrorConstraintViolation, 19)
self.assertEqual(CloudKit.CKErrorOperationCancelled, 20)
self.assertEqual(CloudKit.CKErrorChangeTokenExpired, 21)
self.assertEqual(CloudKit.CKErrorBatchRequestFailed, 22)
self.assertEqual(CloudKit.CKErrorZoneBusy, 23)
self.assertEqual(CloudKit.CKErrorBadDatabase, 24)
self.assertEqual(CloudKit.CKErrorQuotaExceeded, 25)
self.assertEqual(CloudKit.CKErrorZoneNotFound, 26)
self.assertEqual(CloudKit.CKErrorLimitExceeded, 27)
self.assertEqual(CloudKit.CKErrorUserDeletedZone, 28)
self.assertEqual(CloudKit.CKErrorTooManyParticipants, 29)
self.assertEqual(CloudKit.CKErrorAlreadyShared, 30)
self.assertEqual(CloudKit.CKErrorReferenceViolation, 31)
self.assertEqual(CloudKit.CKErrorManagedAccountRestricted, 32)
self.assertEqual(CloudKit.CKErrorParticipantMayNeedVerification, 33)
self.assertEqual(CloudKit.CKErrorServerResponseLost, 34)
self.assertEqual(CloudKit.CKErrorAssetNotAvailable, 35)
self.assertEqual(CloudKit.CKErrorAccountTemporarilyUnavailable, 36)
@min_os_level("12.0")
def testConstants12_0(self):
self.assertIsInstance(CloudKit.CKErrorUserDidResetEncryptedDataKey, str)
|
401f6ea62f9ea281afde1c4fc33bece1280f2d77
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/H2O/ArchiveH2O/future/tests/base.py
|
9f4607b691501b83cb36bc92e9fc7d66b049b85f
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 19,706
|
py
|
base.py
|
from __future__ import print_function, absolute_import
import os
import tempfile
import unittest
import sys
import re
import warnings
import io
from textwrap import dedent
from future.utils import bind_method, PY26, PY3, PY2, PY27
from future.moves.subprocess import check_output, STDOUT, CalledProcessError
if PY26:
import unittest2 as unittest
def reformat_code(code):
"""
Removes any leading \n and dedents.
"""
if code.startswith('\n'):
code = code[1:]
return dedent(code)
def order_future_lines(code):
"""
Returns the code block with any ``__future__`` import lines sorted, and
then any ``future`` import lines sorted, then any ``builtins`` import lines
sorted.
This only sorts the lines within the expected blocks.
See test_order_future_lines() for an example.
"""
# We need .splitlines(keepends=True), which doesn't exist on Py2,
# so we use this instead:
lines = code.split('\n')
uufuture_line_numbers = [i for i, line in enumerate(lines)
if line.startswith('from __future__ import ')]
future_line_numbers = [i for i, line in enumerate(lines)
if line.startswith('from future')
or line.startswith('from past')]
builtins_line_numbers = [i for i, line in enumerate(lines)
if line.startswith('from builtins')]
assert code.lstrip() == code, ('internal usage error: '
'dedent the code before calling order_future_lines()')
def mymax(numbers):
return max(numbers) if len(numbers) > 0 else 0
def mymin(numbers):
return min(numbers) if len(numbers) > 0 else float('inf')
assert mymax(uufuture_line_numbers) <= mymin(future_line_numbers), \
'the __future__ and future imports are out of order'
# assert mymax(future_line_numbers) <= mymin(builtins_line_numbers), \
# 'the future and builtins imports are out of order'
uul = sorted([lines[i] for i in uufuture_line_numbers])
sorted_uufuture_lines = dict(zip(uufuture_line_numbers, uul))
fl = sorted([lines[i] for i in future_line_numbers])
sorted_future_lines = dict(zip(future_line_numbers, fl))
bl = sorted([lines[i] for i in builtins_line_numbers])
sorted_builtins_lines = dict(zip(builtins_line_numbers, bl))
# Replace the old unsorted "from __future__ import ..." lines with the
# new sorted ones:
new_lines = []
for i in range(len(lines)):
if i in uufuture_line_numbers:
new_lines.append(sorted_uufuture_lines[i])
elif i in future_line_numbers:
new_lines.append(sorted_future_lines[i])
elif i in builtins_line_numbers:
new_lines.append(sorted_builtins_lines[i])
else:
new_lines.append(lines[i])
return '\n'.join(new_lines)
class VerboseCalledProcessError(CalledProcessError):
"""
Like CalledProcessError, but it displays more information (message and
script output) for diagnosing test failures etc.
"""
def __init__(self, msg, returncode, cmd, output=None):
self.msg = msg
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return ("Command '%s' failed with exit status %d\nMessage: %s\nOutput: %s"
% (self.cmd, self.returncode, self.msg, self.output))
class FuturizeError(VerboseCalledProcessError):
pass
class PasteurizeError(VerboseCalledProcessError):
pass
class CodeHandler(unittest.TestCase):
"""
Handy mixin for test classes for writing / reading / futurizing /
running .py files in the test suite.
"""
def setUp(self):
"""
The outputs from the various futurize stages should have the
following headers:
"""
# After stage1:
# TODO: use this form after implementing a fixer to consolidate
# __future__ imports into a single line:
# self.headers1 = """
# from __future__ import absolute_import, division, print_function
# """
self.headers1 = reformat_code("""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
""")
# After stage2 --all-imports:
# TODO: use this form after implementing a fixer to consolidate
# __future__ imports into a single line:
# self.headers2 = """
# from __future__ import (absolute_import, division,
# print_function, unicode_literals)
# from future import standard_library
# from future.builtins import *
# """
self.headers2 = reformat_code("""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
""")
self.interpreters = [sys.executable]
self.tempdir = tempfile.mkdtemp() + os.path.sep
pypath = os.getenv('PYTHONPATH')
if pypath:
self.env = {'PYTHONPATH': os.getcwd() + os.pathsep + pypath}
else:
self.env = {'PYTHONPATH': os.getcwd()}
def convert(self, code, stages=(1, 2), all_imports=False, from3=False,
reformat=True, run=True, conservative=False):
"""
Converts the code block using ``futurize`` and returns the
resulting code.
Passing stages=[1] or stages=[2] passes the flag ``--stage1`` or
``stage2`` to ``futurize``. Passing both stages runs ``futurize``
with both stages by default.
If from3 is False, runs ``futurize``, converting from Python 2 to
both 2 and 3. If from3 is True, runs ``pasteurize`` to convert
from Python 3 to both 2 and 3.
Optionally reformats the code block first using the reformat() function.
If run is True, runs the resulting code under all Python
interpreters in self.interpreters.
"""
if reformat:
code = reformat_code(code)
self._write_test_script(code)
self._futurize_test_script(stages=stages, all_imports=all_imports,
from3=from3, conservative=conservative)
output = self._read_test_script()
if run:
for interpreter in self.interpreters:
_ = self._run_test_script(interpreter=interpreter)
return output
def compare(self, output, expected, ignore_imports=True):
"""
Compares whether the code blocks are equal. If not, raises an
exception so the test fails. Ignores any trailing whitespace like
blank lines.
If ignore_imports is True, passes the code blocks into the
strip_future_imports method.
If one code block is a unicode string and the other a
byte-string, it assumes the byte-string is encoded as utf-8.
"""
if ignore_imports:
output = self.strip_future_imports(output)
expected = self.strip_future_imports(expected)
if isinstance(output, bytes) and not isinstance(expected, bytes):
output = output.decode('utf-8')
if isinstance(expected, bytes) and not isinstance(output, bytes):
expected = expected.decode('utf-8')
self.assertEqual(order_future_lines(output.rstrip()),
expected.rstrip())
def strip_future_imports(self, code):
"""
Strips any of these import lines:
from __future__ import <anything>
from future <anything>
from future.<anything>
from builtins <anything>
or any line containing:
install_hooks()
or:
install_aliases()
Limitation: doesn't handle imports split across multiple lines like
this:
from __future__ import (absolute_import, division, print_function,
unicode_literals)
"""
output = []
# We need .splitlines(keepends=True), which doesn't exist on Py2,
# so we use this instead:
for line in code.split('\n'):
if not (line.startswith('from __future__ import ')
or line.startswith('from future ')
or line.startswith('from builtins ')
or 'install_hooks()' in line
or 'install_aliases()' in line
# but don't match "from future_builtins" :)
or line.startswith('from future.')):
output.append(line)
return '\n'.join(output)
def convert_check(self, before, expected, stages=(1, 2), all_imports=False,
ignore_imports=True, from3=False, run=True,
conservative=False):
"""
Convenience method that calls convert() and compare().
Reformats the code blocks automatically using the reformat_code()
function.
If all_imports is passed, we add the appropriate import headers
for the stage(s) selected to the ``expected`` code-block, so they
needn't appear repeatedly in the test code.
If ignore_imports is True, ignores the presence of any lines
beginning:
from __future__ import ...
from future import ...
for the purpose of the comparison.
"""
output = self.convert(before, stages=stages, all_imports=all_imports,
from3=from3, run=run, conservative=conservative)
if all_imports:
headers = self.headers2 if 2 in stages else self.headers1
else:
headers = ''
self.compare(output, headers + reformat_code(expected),
ignore_imports=ignore_imports)
def unchanged(self, code, **kwargs):
"""
Convenience method to ensure the code is unchanged by the
futurize process.
"""
self.convert_check(code, code, **kwargs)
def _write_test_script(self, code, filename='mytestscript.py'):
"""
Dedents the given code (a multiline string) and writes it out to
a file in a temporary folder like /tmp/tmpUDCn7x/mytestscript.py.
"""
if isinstance(code, bytes):
code = code.decode('utf-8')
# Be explicit about encoding the temp file as UTF-8 (issue #63):
with io.open(self.tempdir + filename, 'wt', encoding='utf-8') as f:
f.write(dedent(code))
def _read_test_script(self, filename='mytestscript.py'):
with io.open(self.tempdir + filename, 'rt', encoding='utf-8') as f:
newsource = f.read()
return newsource
def _futurize_test_script(self, filename='mytestscript.py', stages=(1, 2),
all_imports=False, from3=False,
conservative=False):
params = []
stages = list(stages)
if all_imports:
params.append('--all-imports')
if from3:
script = 'pasteurize.py'
else:
script = 'futurize.py'
if stages == [1]:
params.append('--stage1')
elif stages == [2]:
params.append('--stage2')
else:
assert stages == [1, 2]
if conservative:
params.append('--conservative')
# No extra params needed
# Absolute file path:
fn = self.tempdir + filename
call_args = [sys.executable, script] + params + ['-w', fn]
try:
output = check_output(call_args, stderr=STDOUT, env=self.env)
except CalledProcessError as e:
with open(fn) as f:
msg = (
'Error running the command %s\n'
'%s\n'
'Contents of file %s:\n'
'\n'
'%s') % (
' '.join(call_args),
'env=%s' % self.env,
fn,
'----\n%s\n----' % f.read(),
)
ErrorClass = (FuturizeError if 'futurize' in script else PasteurizeError)
raise ErrorClass(msg, e.returncode, e.cmd, output=e.output)
return output
def _run_test_script(self, filename='mytestscript.py',
interpreter=sys.executable):
# Absolute file path:
fn = self.tempdir + filename
try:
output = check_output([interpreter, fn],
env=self.env, stderr=STDOUT)
except CalledProcessError as e:
with open(fn) as f:
msg = (
'Error running the command %s\n'
'%s\n'
'Contents of file %s:\n'
'\n'
'%s') % (
' '.join([interpreter, fn]),
'env=%s' % self.env,
fn,
'----\n%s\n----' % f.read(),
)
if not hasattr(e, 'output'):
# The attribute CalledProcessError.output doesn't exist on Py2.6
e.output = None
raise VerboseCalledProcessError(msg, e.returncode, e.cmd, output=e.output)
return output
# Decorator to skip some tests on Python 2.6 ...
skip26 = unittest.skipIf(PY26, "this test is known to fail on Py2.6")
def expectedFailurePY3(func):
if not PY3:
return func
return unittest.expectedFailure(func)
def expectedFailurePY26(func):
if not PY26:
return func
return unittest.expectedFailure(func)
def expectedFailurePY27(func):
if not PY27:
return func
return unittest.expectedFailure(func)
def expectedFailurePY2(func):
if not PY2:
return func
return unittest.expectedFailure(func)
# Renamed in Py3.3:
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
# From Py3.3:
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, unicode)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
if not hasattr(unittest.TestCase, 'assertRegex'):
bind_method(unittest.TestCase, 'assertRegex', assertRegex)
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
if not hasattr(unittest.TestCase, 'assertWarns'):
bind_method(unittest.TestCase, 'assertWarns', assertWarns)
|
47436d3a5d300f28cdca3d7d9d9168b7cb8b6110
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/serve/tests/test_autoscaling_metrics.py
|
36046cb88474e2816040dd122c2cb7de4d0abfaf
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,146
|
py
|
test_autoscaling_metrics.py
|
import time
import ray
from ray import serve
from ray._private.test_utils import wait_for_condition
from ray.serve._private.autoscaling_metrics import InMemoryMetricsStore
from ray.serve._private.common import ReplicaState, DeploymentID
class TestInMemoryMetricsStore:
def test_basics(self):
s = InMemoryMetricsStore()
s.add_metrics_point({"m1": 1}, timestamp=1)
s.add_metrics_point({"m1": 2}, timestamp=2)
assert s.window_average("m1", window_start_timestamp_s=0) == 1.5
assert s.max("m1", window_start_timestamp_s=0) == 2
def test_out_of_order_insert(self):
s = InMemoryMetricsStore()
s.add_metrics_point({"m1": 1}, timestamp=1)
s.add_metrics_point({"m1": 5}, timestamp=5)
s.add_metrics_point({"m1": 3}, timestamp=3)
s.add_metrics_point({"m1": 2}, timestamp=2)
s.add_metrics_point({"m1": 4}, timestamp=4)
assert s.window_average("m1", window_start_timestamp_s=0) == 3
assert s.max("m1", window_start_timestamp_s=0) == 5
def test_window_start_timestamp(self):
s = InMemoryMetricsStore()
assert s.window_average("m1", window_start_timestamp_s=0) is None
assert s.max("m1", window_start_timestamp_s=0) is None
s.add_metrics_point({"m1": 1}, timestamp=2)
assert s.window_average("m1", window_start_timestamp_s=0) == 1
assert (
s.window_average("m1", window_start_timestamp_s=10, do_compact=False)
is None
)
def test_compaction_window(self):
s = InMemoryMetricsStore()
s.add_metrics_point({"m1": 1}, timestamp=1)
s.add_metrics_point({"m1": 2}, timestamp=2)
assert (
s.window_average("m1", window_start_timestamp_s=0, do_compact=False) == 1.5
)
s.window_average("m1", window_start_timestamp_s=1.1, do_compact=True)
# First record should be removed.
assert s.window_average("m1", window_start_timestamp_s=0, do_compact=False) == 2
def test_compaction_max(self):
s = InMemoryMetricsStore()
s.add_metrics_point({"m1": 1}, timestamp=2)
s.add_metrics_point({"m1": 2}, timestamp=1)
assert s.max("m1", window_start_timestamp_s=0, do_compact=False) == 2
s.window_average("m1", window_start_timestamp_s=1.1, do_compact=True)
assert s.window_average("m1", window_start_timestamp_s=0, do_compact=False) == 1
def test_multiple_metrics(self):
s = InMemoryMetricsStore()
s.add_metrics_point({"m1": 1, "m2": -1}, timestamp=1)
s.add_metrics_point({"m1": 2, "m2": -2}, timestamp=2)
assert s.window_average("m1", window_start_timestamp_s=0) == 1.5
assert s.max("m1", window_start_timestamp_s=0) == 2
assert s.max("m2", window_start_timestamp_s=0) == -1
def test_e2e(serve_instance):
@serve.deployment(
autoscaling_config={
"metrics_interval_s": 0.1,
"min_replicas": 1,
"max_replicas": 2,
"target_num_ongoing_requests_per_replica": 1,
"upscale_delay_s": 0,
"downscale_delay_s": 0,
"look_back_period_s": 1,
},
# We will send over a lot of queries. This will make sure replicas are
# killed quickly during cleanup.
graceful_shutdown_timeout_s=1,
max_concurrent_queries=25,
version="v1",
)
class A:
def __call__(self):
time.sleep(0.1)
handle = serve.run(A.bind())
dep_id = DeploymentID("A", "default")
[handle.remote() for _ in range(50)]
# Wait for metrics to propagate
def get_data():
data = ray.get(
serve_instance._controller._dump_autoscaling_metrics_for_testing.remote()
)[dep_id]
print(data)
return data
wait_for_condition(lambda: len(get_data()) > 0)
print("Autoscaling metrics started recording on controller.")
# Many queries should be inflight.
def last_timestamp_value_high():
data = get_data()
metrics = list(data.values())
assert len(metrics) == 2
assert metrics[0] > 0 and metrics[1] > 0
assert sum(metrics) > 25
return True
wait_for_condition(last_timestamp_value_high)
print("Confirmed there are metrics from 2 replicas, and many queries are inflight.")
def check_running_replicas(expected):
replicas = ray.get(
serve_instance._controller._dump_replica_states_for_testing.remote(dep_id)
)
running_replicas = replicas.get([ReplicaState.RUNNING])
assert len(running_replicas) == expected
return True
# After traffic stops, num replica should drop to 1
wait_for_condition(check_running_replicas, expected=1, timeout=15)
print("Num replicas dropped to 1.")
# The metrics stored on controller should only have info on the remaining replica
wait_for_condition(lambda: len(get_data()) == 1)
print("Metrics stored on the controller reduced to 1 replica.")
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-s", __file__]))
|
48f3fc913720773338a5653f8ccea3792d64640a
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CondFormats/EcalObjects/test/fillEcalTimeCorrections_cfg.py
|
1b0750739387989796a693a56397b84833f3dee4
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,083
|
py
|
fillEcalTimeCorrections_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("InitialData")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.connect = 'sqlite_file:/tmp/ecal_timebias_corrections.db'
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
timetype = cms.untracked.string('runnumber'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('EcalTimeBiasCorrectionsRcd'),
tag = cms.string('initial')
))
)
process.timebias = cms.EDAnalyzer("EcalTimeBiasCorrectionsFillInitial",
EBtimeCorrAmplitudeBins = cms.vdouble(
7.9, 8.9, 10, 11.2, 12.5, 14.1, 15.8, 17.7, 19.9, 22.3, 25, 28.1, 31.5, 35.3, 39.7,
44.5, 49.9, 56, 62.8, 70.5, 79.1, 88.8, 99.6, 111.7, 125.4, 140.7, 157.9, 177.1, 198.7, 223,
250.2, 280.7, 315, 353.4, 396.5, 444.9, 499.2, 560.1, 628.4, 705.1, 791.1, 887.7, 996, 1117.5, 1253.9,
1406.8, 1578.5, 1771.1, 1987.2, 2229.7, 2501.8, 2807, 3149.5, 3533.8, 3895.9, 3896, 4311.8, 4837.9, 5428.2, 6090.6,
6833.7, 7667.5, 8603.1, 9652.9, 10830, 12152, 13635, 15298, 17165, 19260, 21610),
EBtimeCorrShiftBins = cms.vdouble(
-1.770, -1.770, -1.770, -1.770, -1.666, -1.430, -1.233, -1.012, -0.866, -0.736, -0.640, -0.561, -0.505, -0.452, -0.405,
-0.363, -0.335, -0.305, -0.279, -0.260, -0.239, -0.220, -0.204, -0.191, -0.186, -0.177, -0.158, -0.137, -0.126, -0.115,
-0.104, -0.096, -0.085, -0.064, -0.056, -0.036, -0.020, -0.006, -0.020, -0.009, -0.020, 0.005, 0.053, 0.076, 0.093,
0.137, 0.143, 0.171, 0.222, 0.229, 0.271, 0.298, 0.312, 0.307, 0.254 , -0.997 ,-0.859 , -0.819, -0.775, -0.589,
-0.428, -0.288, -0.434, -0.277, -0.210, -0.179, -0.134, 0.362, 0.152, -0.282, -0.382),
EEtimeCorrAmplitudeBins = cms.vdouble(
15.7, 17.6, 19.7, 22.1, 24.8, 27.9, 31.3, 35.1, 39.4, 44.2, 49.6, 55.6, 62.4, 70, 78.6,
88.1, 98.9, 111, 124.5, 139.7, 156.7, 175.9, 197.3, 221.4, 248.4, 278.7, 312.7, 350.9, 393.7, 441.7,
495.6, 556.1, 624, 700.1, 785.5, 881.4, 988.9, 1109.6, 1245, 1396.9, 1567.3, 1758.6, 1973.1, 2213.9, 2484,
2787.1, 3127.2, 3508.8, 3936.9, 4417.3, 4956.3, 5561.1, 6239.6, 7001, 7522.8, 8440.7, 9470.6, 10626),
EEtimeCorrShiftBins = cms.vdouble(
-0.896, -0.896, -0.896, -0.896, -0.563, -0.392, -0.287, -0.203, -0.135, -0.100, -0.068, -0.050, -0.060, -0.052, -0.055,
-0.050, -0.052, -0.056, -0.055, -0.056, -0.048, -0.037, -0.038, -0.037, -0.025, -0.026, -0.024, -0.013, -0.003, 0.005,
0.020, 0.026, 0.008, 0.007, -0.006, 0.024, 0.045, 0.062, 0.085, 0.088 , 0.111 , 0.139, 0.156, 0.176, 0.210,
0.242, 0.267, 0.301, 0.318, 0.278, 0.287, 0.218, 0.305, 0.245, 0.184, -0.159, -0.095, 0.037),
)
process.p = cms.Path(process.timebias)
|
627ef439da49e942b663fdecccd3beb825aae6ef
|
0a9ef5c24d912ab52b9a80ed97b5bb1240f6369e
|
/lastversion/utils.py
|
116cb3fba33d7c92bbc2c5e6a812fea7c9ab1b7b
|
[
"GPL-1.0-or-later",
"BSD-2-Clause"
] |
permissive
|
dvershinin/lastversion
|
797e7e7f548d61fc3f3e43e0b383049d409ff58a
|
e300d10d40c637fb0c405cb43926721f3048fbc4
|
refs/heads/master
| 2023-08-15T09:28:23.209254
| 2023-07-15T13:00:38
| 2023-07-15T13:00:38
| 187,117,077
| 312
| 34
|
BSD-2-Clause
| 2023-02-24T04:57:00
| 2019-05-17T00:00:09
|
Python
|
UTF-8
|
Python
| false
| false
| 10,860
|
py
|
utils.py
|
import io
import logging
import os
import platform
import re
import sys
import tarfile
import errno
import distro
import requests
import tqdm
from six.moves import urllib
log = logging.getLogger(__name__)
class ApiCredentialsError(Exception):
"""Raised when there's an API error related to credentials"""
class BadProjectError(Exception):
"""Raised when no such project exists"""
# matches os.name to known extensions that are meant *mostly* to run on it, and not other os.name-s
os_extensions = {
'nt': ('.exe', '.msi', '.msi.asc', '.msi.sha256'),
'posix': ('.tgz', '.tar.gz')
}
extension_distros = {
'deb': ['ubuntu', 'debian'],
'rpm': ['rhel', 'centos', 'fedora', 'amazon', 'cloudlinux'],
'apk': ['alpine'],
'dmg': ['darwin']
}
# matches *start* of sys.platform value to words in asset name
platform_markers = {
'win': ['windows', 'win'],
'linux': ['linux'],
'darwin': ['osx', 'darwin'],
'freebsd': ['freebsd', 'netbsd', 'openbsd']
}
# this is all too simple for now
non_amd64_markers = ['i386', 'i686', 'arm', 'arm64', '386', 'ppc64', 'armv7', 'armv7l',
'mips64', 'ppc64', 'mips64le', 'ppc64le', 'aarch64', 'armhf', 'armv7hl']
def asset_does_not_belong_to_machine(asset):
"""
Checks whether a given asset name is likely unusable on this machine
An asset belongs to machine as long as this returns False
:param asset:
:type asset: str
:return:
:rtype:
"""
# replace underscore with dash so that our shiny word boundary regexes won't break
asset = asset.replace('_', '-')
# bail if asset's extension "belongs" to other OS-es (simple)
for os_name, ext in os_extensions.items():
if os.name != os_name and asset.endswith(ext):
return True
for pf, pf_words in platform_markers.items():
if not sys.platform.startswith(pf):
for pfWord in pf_words:
r = re.compile(r'\b{}(\d+)?\b'.format(pfWord), flags=re.IGNORECASE)
matches = r.search(asset)
if matches:
return True
if sys.platform.startswith('linux'):
# Weeding out non-matching Linux distros
for ext, ext_distros in extension_distros.items():
if asset.endswith("." + ext) and distro.id() not in ext_distros:
return True
# weed out non-64 bit stuff from x86_64 bit OS
# caution: may be false positive with 32 bit Python on 64-bit OS
if platform.machine() in ['x86_64', 'AMD64']:
for non_amd64_word in non_amd64_markers:
r = re.compile(r'\b{}\b'.format(non_amd64_word), flags=re.IGNORECASE)
if r.search(asset):
return True
r = re.compile(r'\barm\d+\b', flags=re.IGNORECASE)
if r.search(asset):
return True
return False
# monkey patching older requests library's response class, so it can use context manager
# https://github.com/psf/requests/issues/4136
def requests_response_patched_enter(self):
return self
# noinspection PyUnusedLocal
def requests_response_patched_exit(self, *args):
self.close()
if not hasattr(requests.Response, '__exit__'):
requests.Response.__enter__ = requests_response_patched_enter
requests.Response.__exit__ = requests_response_patched_exit
def extract_appimage_desktop_file(appimage_path):
"""Extracts the desktop file from an AppImage
Args:
appimage_path (str): Path to the AppImage
Returns:
str: Path to the extracted desktop file
"""
import shutil
import subprocess
import tempfile
temp_dir = tempfile.mkdtemp()
# Extract the contents of the AppImage file to a temporary directory
subprocess.call([appimage_path, "--appimage-extract"], cwd=temp_dir)
# Search the temporary directory for the .desktop file
desktop_file = None
for root, dirs, files in os.walk(temp_dir):
for file in files:
if file.endswith(".desktop"):
desktop_file = os.path.join(root, file)
break
if desktop_file:
break
# Copy the .desktop file to the current directory
if desktop_file:
shutil.copy(desktop_file, ".")
# Remove the temporary directory
shutil.rmtree(temp_dir)
def get_content_disposition_filename(response):
"""Get the preferred filename from the `Content-Disposition` header.
Examples:
`attachment; filename="emulationstation-de-2.0.0-x64.deb"; filename*=UTF-8''emulationstation-de-2.0.0-x64.deb`
"""
filename = None
content_disp = response.headers.get('content-disposition')
if not content_disp or not content_disp.startswith('attachment;'):
return None
for m in re.finditer(r"filename(?P<priority>\*)?=((?P<encoding>[\S-]+)'')?(?P<filename>[^;]*)", content_disp):
filename = m.group('filename')
encoding = m.group('encoding')
if encoding:
filename = urllib.parse.unquote(filename)
filename = filename.encode(encoding).decode('utf-8')
if m.group('priority'):
break
return filename
def download_file(url, local_filename=None):
"""Download a URL to the given filename.
Args:
url (str): URL to download from
local_filename (:obj:`str`, optional): Destination filename
Defaults to current directory plus base name of the URL.
Returns:
str: Destination filename, on success
"""
if local_filename is None:
local_filename = url.split('/')[-1]
try:
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
if '.' not in local_filename and 'Content-Disposition' in r.headers:
disp_filename = get_content_disposition_filename(r)
if disp_filename:
local_filename = disp_filename
# content-length may be empty, default to 0
file_size = int(r.headers.get('Content-Length', 0))
bar_size = 1024
# fetch 8 KB at a time
chunk_size = 8192
# how many bars are there in a chunk?
chunk_bar_size = chunk_size / bar_size
# bars are by KB
num_bars = int(file_size / bar_size)
# noinspection PyTypeChecker
pbar = tqdm.tqdm(
disable=None, # disable on non-TTY
total=num_bars,
unit='KB',
desc='Downloading {}'.format(local_filename),
leave=True # progressbar stays
)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# we fetch 8 KB, so we update progress by +8x
pbar.update(chunk_bar_size)
pbar.set_description('Downloaded {}'.format(local_filename))
pbar.close()
except KeyboardInterrupt:
pbar.close()
os.remove(local_filename)
log.warning('Cancelled')
sys.exit(1)
return local_filename
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None):
"""Safe extract .tar.gz to workaround CVE-2007-4559. CVE-2007-4559
Args:
tar ():
path ():
members ():
"""
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members)
def extract_file(url):
"""Extract an archive while stripping the top level dir."""
smart_members = []
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
# Download the file in chunks and save it to a memory buffer
# content-length may be empty, default to 0
file_size = int(r.headers.get('Content-Length', 0))
bar_size = 1024
# fetch 8 KB at a time
chunk_size = 8192
# how many bars are there in a chunk?
chunk_bar_size = chunk_size / bar_size
# bars are by KB
num_bars = int(file_size / bar_size)
buffer = io.BytesIO()
# noinspection PyTypeChecker
with tqdm.tqdm(
disable=None, # disable on non-TTY
total=num_bars,
unit='KB',
desc=url.split('/')[-1]
) as pbar:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
buffer.write(chunk)
pbar.update(chunk_bar_size)
# Process the file in memory (e.g. extract its contents)
buffer.seek(0)
# Process the buffer (e.g. extract its contents)
mode = 'r:gz'
if url.endswith('.tar.xz'):
mode = 'r:xz'
with tarfile.open(fileobj=buffer, mode=mode) as tar_file:
all_members = tar_file.getmembers()
if not all_members:
log.critical('No or not an archive')
root_dir = all_members[0].path
root_dir_with_slash_len = len(root_dir) + 1
for member in tar_file.getmembers():
if member.path.startswith(root_dir + "/"):
member.path = member.path[root_dir_with_slash_len:]
smart_members.append(member)
safe_extract(tar_file, members=smart_members)
except KeyboardInterrupt:
pbar.close()
log.warning('Cancelled')
sys.exit(1)
def rpm_installed_version(name):
"""Get the installed version of a package with the given name.
Args:
name (str): Package name
Returns:
string: Version of the installed packaged, or None
"""
try:
import rpm
except ImportError:
return False
ts = rpm.TransactionSet()
mi = ts.dbMatch('name', name)
if mi:
for h in mi:
return h['version']
return None
def ensure_directory_exists(directory_path):
"""
Ensure that the given directory exists.
Workaround for `exist_ok=True` not being available in Python 2.7.
Args:
directory_path (str):
Returns:
"""
try:
os.makedirs(directory_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
|
ebd0ac700d0192cdab1a0208848457c9cbaa8477
|
cf35a104dabc3d3647df66aff9db32d18b002e1b
|
/alternative_wmiircs/python/pyxp/fields.py
|
c7f0d617528c6fdce9ce25c3455b1dbe9a3f9f7a
|
[
"MIT"
] |
permissive
|
0intro/wmii
|
ba2dff4be1a049c7d4475218a914585a3f810ab5
|
024f29d1058b58aa4ee6b956500a78f69355fb53
|
refs/heads/main
| 2023-07-11T18:08:43.660898
| 2023-06-25T18:57:45
| 2023-06-25T18:57:45
| 74,551,919
| 110
| 17
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,084
|
py
|
fields.py
|
from datetime import datetime
import operator
class Field(object):
idx = 0
def __init__(self):
Field.idx += 1
self.id = Field.idx
def repr(self):
return self.__class__.__name__
def __repr__(self):
if hasattr(self, 'name'):
return '<Field %s "%s">' % (self.repr(), self.name)
return super(Field, self).__repr__()
class Int(Field):
encoders = {}
decoders = {}
@classmethod
def encoder(cls, n):
if n not in cls.encoders:
exec ('def enc(n):\n' +
' assert n == n & 0x%s, "Arithmetic overflow"\n' +
' return "".join((%s,))'
) % ('ff' * n,
','.join('chr((n >> %d) & 0xff)' % (i * 8)
for i in range(0, n)))
cls.encoders[n] = enc
return cls.encoders[n]
@classmethod
def decoder(cls, n):
if n not in cls.decoders:
cls.decoders[n] = eval('lambda data, offset: ' +
'|'.join('ord(data[offset + %d]) << %d' % (i, i * 8)
for i in range(0, n)))
return cls.decoders[n]
def __init__(self, size):
super(Int, self).__init__()
self.size = size
self.encode = self.encoder(size)
self.decode = self.decoder(size)
if self.__class__ == Int:
self.marshall = self.encode
def unmarshall(self, data, offset):
return self.size, self.decode(data, offset)
def marshall(self, val):
return self.encode(val)
def repr(self):
return '%s(%d)' % (self.__class__.__name__, self.size)
class Size(Int):
def __init__(self, size, extra=0):
super(Size, self).__init__(size)
self.extra = extra
def marshall(self, val):
return lambda vals, i: self.encode(
reduce(lambda n, i: n + len(vals[i]),
range(i + 1, len(vals)),
self.extra))
class Date(Int):
def __init__(self):
super(Date, self).__init__(4)
def unmarshall(self, data, offset):
val = self.decode(data, offset)
return 4, datetime.fromtimestamp(val)
def marshall(self, val):
return self.encode(int(val.strftime('%s')))
class Data(Int):
def __init__(self, size=2):
super(Data, self).__init__(size)
def unmarshall(self, data, offset):
n = self.decode(data, offset)
offset += self.size
assert offset + n <= len(data), "String too long to unpack"
return self.size + n, data[offset:offset + n]
def marshall(self, val):
if isinstance(val, unicode):
val = val.encode('UTF-8')
return [self.encode(len(val)), val]
# Note: Py3K strings are Unicode by default. They can't store binary
# data.
class String(Data):
def unmarshall(self, data, offset):
off, val = super(String, self).unmarshall(data, offset)
return off, val.decode('UTF-8')
def marshall(self, val):
if isinstance(val, str):
# Check for valid UTF-8
str.decode('UTF-8')
else:
val = val.encode('UTF-8')
return super(String, self).marshall(val)
class Array(Int):
def __init__(self, size, spec):
super(Array, self).__init__(size)
self.spec = spec
def unmarshall(self, data, offset):
start = offset
n = self.decode(data, offset)
offset += self.size
res = []
for i in range(0, n):
size, val = self.spec.unmarshall(data, offset)
if isinstance(val, list):
res += val
else:
res.append(val)
offset += size
return offset - start, res
def marshall(self, vals):
res = [self.encode(len(vals))]
for val in vals:
val = self.spec.marshall(val)
if isinstance(val, list):
res += val
else:
res.append(val)
return res
# vim:se sts=4 sw=4 et:
|
ae64eabd9c19d984aee4a2b6cd33855809c3e5e4
|
a115d1ea106a890a877d902efc70e3f806525de8
|
/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/inference.py
|
1f6fe2be3efb883ebd7fe86ccfd00377f1e55c5c
|
[
"Python-2.0",
"MIT"
] |
permissive
|
KaihuaTang/Scene-Graph-Benchmark.pytorch
|
7b59051e78d566f1b94836280ca91a61e2503bb4
|
4b6b71a90d4198d9dae574d42b062a5e534da291
|
refs/heads/master
| 2023-08-04T18:32:27.979660
| 2022-07-29T06:16:23
| 2022-07-29T06:16:23
| 241,830,209
| 1,002
| 261
|
MIT
| 2023-07-30T08:37:14
| 2020-02-20T08:23:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,468
|
py
|
inference.py
|
import torch
from torch import nn
class KeypointPostProcessor(nn.Module):
def __init__(self, keypointer=None):
super(KeypointPostProcessor, self).__init__()
self.keypointer = keypointer
def forward(self, x, boxes):
mask_prob = x
scores = None
if self.keypointer:
mask_prob, scores = self.keypointer(x, boxes)
assert len(boxes) == 1, "Only non-batched inference supported for now"
boxes_per_image = [box.bbox.size(0) for box in boxes]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
scores = scores.split(boxes_per_image, dim=0)
results = []
for prob, box, score in zip(mask_prob, boxes, scores):
bbox = BoxList(box.bbox, box.size, mode="xyxy")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
prob = PersonKeypoints(prob, box.size)
prob.add_field("logits", score)
bbox.add_field("keypoints", prob)
results.append(bbox)
return results
# TODO remove and use only the Keypointer
import numpy as np
import cv2
def heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = 0 # cfg.KRCNN.INFERENCE_MIN_SIZE
num_keypoints = maps.shape[3]
xy_preds = np.zeros((len(rois), 3, num_keypoints), dtype=np.float32)
end_scores = np.zeros((len(rois), num_keypoints), dtype=np.float32)
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = cv2.resize(
maps[i], (roi_map_width, roi_map_height), interpolation=cv2.INTER_CUBIC
)
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
# roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
pos = roi_map.reshape(num_keypoints, -1).argmax(axis=1)
x_int = pos % w
y_int = (pos - x_int) // w
# assert (roi_map_probs[k, y_int, x_int] ==
# roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, :] = x + offset_x[i]
xy_preds[i, 1, :] = y + offset_y[i]
xy_preds[i, 2, :] = 1
end_scores[i, :] = roi_map[np.arange(num_keypoints), y_int, x_int]
return np.transpose(xy_preds, [0, 2, 1]), end_scores
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.keypoint import PersonKeypoints
class Keypointer(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, padding=0):
self.padding = padding
def __call__(self, masks, boxes):
# TODO do this properly
if isinstance(boxes, BoxList):
boxes = [boxes]
assert len(boxes) == 1
result, scores = heatmaps_to_keypoints(
masks.detach().cpu().numpy(), boxes[0].bbox.cpu().numpy()
)
return torch.from_numpy(result).to(masks.device), torch.as_tensor(scores, device=masks.device)
def make_roi_keypoint_post_processor(cfg):
keypointer = Keypointer()
keypoint_post_processor = KeypointPostProcessor(keypointer)
return keypoint_post_processor
|
aae442a6ae4ba29396a31fc309ef43feb54a47b1
|
b4b031f59c62dfd4b27b757eeb1ecd98941e04df
|
/django_drf_filepond/migrations/0010_temp_chunked_biginteger.py
|
a10043f25554dc8610da40ac5324a2ed8c6fc486
|
[
"BSD-3-Clause"
] |
permissive
|
ImperialCollegeLondon/django-drf-filepond
|
7566df0ee21211f914f9bc8196c7e82ef8e370c1
|
aec7581a25de7bc545457a4bb546f3f5a5ac805f
|
refs/heads/main
| 2023-08-02T22:03:49.895305
| 2023-04-25T10:05:50
| 2023-04-25T10:05:50
| 158,591,739
| 101
| 41
|
BSD-3-Clause
| 2023-07-24T14:29:56
| 2018-11-21T18:39:40
|
Python
|
UTF-8
|
Python
| false
| false
| 608
|
py
|
0010_temp_chunked_biginteger.py
|
# Generated by Django 3.0.5 on 2021-06-25 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_drf_filepond', '0009_temporaryuploadchunked_offset_utime'),
]
operations = [
migrations.AlterField(
model_name='temporaryuploadchunked',
name='offset',
field=models.BigIntegerField(default=0),
),
migrations.AlterField(
model_name='temporaryuploadchunked',
name='total_size',
field=models.BigIntegerField(default=0),
),
]
|
62d18606655983ded23b0eb890e0e9dec88c2e3a
|
3d589d1c56b55fbd2b45b03564b8a9442ebf142b
|
/examples/catvdog/run.py
|
da98df4ad9e30ea434cc81cb8d0a694b2c337609
|
[
"Apache-2.0"
] |
permissive
|
spotify/klio
|
1aff27412e92c9d699259e5ab1eaeb39dc3e9571
|
e625565708ed846201d2e05f782c0ce585554346
|
refs/heads/develop
| 2023-05-25T14:33:28.348335
| 2022-03-23T20:34:09
| 2022-03-23T20:34:09
| 285,928,366
| 815
| 57
|
Apache-2.0
| 2023-05-24T21:07:09
| 2020-08-07T22:02:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
run.py
|
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import apache_beam as beam
from klio.transforms import helpers
import transforms
def run(input_pcol, config):
"""Main entrypoint in running a job's transform(s).
Run any Beam transforms that need to happen after a message is
consumed from PubSub from an upstream job (if not an apex job),
and before publishing a message to any downstream job (if
needed/configured).
Args:
input_pcol: A Beam PCollection returned from
``beam.io.ReadFromPubSub``.
config (klio.KlioConfig): Job-related configuration as
defined in ``klio-job.yaml``.
Returns:
A Beam PCollection that will be passed to ``beam.io.WriteToPubSub``.
"""
output_data = (
input_pcol
| beam.ParDo(transforms.CatVDogOutputCheck()).with_outputs()
)
output_force = output_data.found | helpers.KlioFilterForce()
to_input_check = (
(output_data.not_found, output_force.process)
| beam.Flatten()
)
to_process = to_input_check | helpers.KlioGcsCheckInputExists()
return to_process.found | beam.ParDo(transforms.CatVDog())
|
bfffce116549db51539c57dc939329d911de723d
|
ca7a8f43442e2c6722ebaf03f0abe9a22575a130
|
/autolens/exc.py
|
03037d21fd55d46ddb2191415177ad64862f8517
|
[
"MIT"
] |
permissive
|
Jammy2211/PyAutoLens
|
764f2ccdb76b54eea0b4a8f2a0ae077397fb0315
|
b31b9d7c8a55d7232695761a41383cb1cc30bd76
|
refs/heads/main
| 2023-08-23T10:07:14.015683
| 2023-08-17T15:39:49
| 2023-08-17T15:39:49
| 105,440,853
| 142
| 37
|
MIT
| 2023-09-13T14:08:23
| 2017-10-01T12:33:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,099
|
py
|
exc.py
|
import autofit as af
from autofit.exc import *
from autoarray.exc import *
from autogalaxy.exc import *
class RayTracingException(af.exc.FitException):
"""
Raises exceptions associated with the `lens/ray_tracing.py` module and `Tracer` class.
For example if the multiple image positions do not trace without a threshold of one another, in order to
resample inaccurate mass models during a model-fit.
This exception inehrits from a `FitException`. This means that if this exception is raised during a model-fit in
the analysis class's `log_likelihood_function` that model is resampled and does not terminate the code.
"""
pass
class PositionsException(af.exc.FitException):
"""
Raises exceptions associated with the positions data in the `point` module.
For example if the multiple image positions do not meet certain format requirements.
This exception inehrits from a `FitException`. This means that if this exception is raised during a model-fit in
the analysis class's `log_likelihood_function` that model is resampled and does not terminate the code.
"""
pass
class PixelizationException(af.exc.FitException):
"""
Raises exceptions associated with the `inversion/pixelization` modules and `Pixelization` classes.
For example if a `Rectangular` mesh has dimensions below 3x3.
This exception overwrites `autoarray.exc.PixelizationException` in order to add a `FitException`. This means that
if this exception is raised during a model-fit in the analysis class's `log_likelihood_function` that model
is resampled and does not terminate the code.
"""
pass
class PointExtractionException(Exception):
"""
Raises exceptions associated with the extraction of quantities in the `point` module, where the name of a
`PointSource` profile often relates to a model-component.
For example if one tries to extract a profile `point_1` but there is no corresponding `PointSource` profile
named `point_1`.
"""
pass
|
74709bf3e566cd87e6d835a4e32f419031b0b7d1
|
4f50373a5aada7b188ab8ae7ea04d3aef5c7cebf
|
/papermerge/test/contrib/admin/test_views_index.py
|
3e0d869c1382f7a6ecd555d009621db01330e29f
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
ciur/papermerge
|
f860a9ee5aa64ae51b6e37c09c37327391f87455
|
79f1d152701f984957f9dee58e8196fc3924871c
|
refs/heads/master
| 2023-04-13T17:15:28.292949
| 2022-12-12T06:56:42
| 2022-12-12T06:56:42
| 232,151,602
| 2,148
| 264
|
Apache-2.0
| 2022-04-04T18:51:49
| 2020-01-06T17:34:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,699
|
py
|
test_views_index.py
|
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from django.http import HttpResponseRedirect
from papermerge.core.models import Document
from papermerge.test.utils import (
create_root_user,
)
class AnonymouseUserIndexAccessView(TestCase):
def setUp(self):
# user exists, but not signed in
self.testcase_user = create_root_user()
self.client = Client()
def test_index(self):
ret = self.client.get(reverse('admin:index'))
self.assertEqual(
ret.status_code,
HttpResponseRedirect.status_code
)
class TestAdvancedSearchView(TestCase):
"""
AV = advanced search
"""
def setUp(self):
self.testcase_user = create_root_user()
self.client = Client()
self.client.login(testcase_user=self.testcase_user)
def test_basic_av_by_tag(self):
"""
In advaced search user can search by tag(s)
"""
doc1 = Document.objects.create_document(
title="doc1",
user=self.testcase_user,
page_count=2,
file_name="koko.pdf",
size='1111',
lang='ENG',
)
doc2 = Document.objects.create_document(
title="doc2",
user=self.testcase_user,
page_count=2,
file_name="kuku.pdf",
size='1111',
lang='ENG',
)
doc1.tags.add(
"green",
"blue",
tag_kwargs={'user': self.testcase_user}
)
doc2.tags.add(
"blue",
tag_kwargs={'user': self.testcase_user}
)
ret = self.client.get(
reverse('admin:search'), {'tag': 'green'}
)
self.assertEqual(
ret.status_code,
200
)
self.assertEqual(
len(ret.context['results_docs']),
1
)
doc_ = ret.context['results_docs'][0]
self.assertEqual(
doc_.id,
doc1.id
)
def test_basic_av_by_tags_op_all(self):
"""
In advaced search user can search by tag(s)
tags_op can be 'all' or 'any'.
tags_op=all: find all documents which contain all tags
"""
doc1 = Document.objects.create_document(
title="doc1",
user=self.testcase_user,
page_count=2,
file_name="koko.pdf",
size='1111',
lang='ENG',
)
doc2 = Document.objects.create_document(
title="doc2",
user=self.testcase_user,
page_count=2,
file_name="kuku.pdf",
size='1111',
lang='ENG',
)
doc3 = Document.objects.create_document(
title="doc3",
user=self.testcase_user,
page_count=2,
file_name="momo.pdf",
size='1111',
lang='ENG',
)
doc1.tags.add(
"green",
"blue",
tag_kwargs={'user': self.testcase_user}
)
doc2.tags.add(
"blue",
tag_kwargs={'user': self.testcase_user}
)
doc3.tags.add(
"green",
"blue",
"red",
tag_kwargs={'user': self.testcase_user}
)
base_url = reverse('admin:search')
args = "tag=green&tag=blue&tags_op=all"
url = f"{base_url}?{args}"
ret = self.client.get(url)
self.assertEqual(
ret.status_code,
200
)
self.assertEqual(
len(ret.context['results_docs']),
2
)
result_ids = set(
[doc_.id for doc_ in ret.context['results_docs']]
)
self.assertEqual(
result_ids,
set([doc1.id, doc3.id])
)
def test_basic_av_by_tags_op_any(self):
"""
In advaced search user can search by tag(s)
tags_op can be 'all' or 'any'.
tags_op=any: find all documents which contain any tags
of the mentioned tags
"""
doc1 = Document.objects.create_document(
title="doc1",
user=self.testcase_user,
page_count=2,
file_name="koko.pdf",
size='1111',
lang='ENG',
)
doc2 = Document.objects.create_document(
title="doc2",
user=self.testcase_user,
page_count=2,
file_name="kuku.pdf",
size='1111',
lang='ENG',
)
doc3 = Document.objects.create_document(
title="doc3",
user=self.testcase_user,
page_count=2,
file_name="momo.pdf",
size='1111',
lang='ENG',
)
doc1.tags.add(
"red",
tag_kwargs={'user': self.testcase_user}
)
doc2.tags.add(
"green",
tag_kwargs={'user': self.testcase_user}
)
doc3.tags.add(
"blue",
tag_kwargs={'user': self.testcase_user}
)
base_url = reverse('admin:search')
args = "tag=red&tag=green&tags_op=any"
url = f"{base_url}?{args}"
ret = self.client.get(url)
self.assertEqual(
ret.status_code,
200
)
self.assertEqual(
len(ret.context['results_docs']),
2
)
result_ids = set(
[doc_.id for doc_ in ret.context['results_docs']]
)
self.assertEqual(
result_ids,
set([doc1.id, doc2.id])
)
|
c4226dc89d9d638c3b87f41527e03d879886b1c6
|
39568e19301a7a112398be542154950af25591de
|
/hw/ip/otbn/dv/otbnsim/test/conftest.py
|
3e63f98c6b62c62d8729b9873c97fcce90afa42f
|
[
"CC-BY-4.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lowRISC/opentitan
|
493995bc7cf7cb3aee486a5203af3fd62bba3bfc
|
51f6017b8425b14d5a4aa9abace8fe5a25ef08c8
|
refs/heads/master
| 2023-08-31T22:05:09.425796
| 2023-08-14T14:52:15
| 2023-08-31T20:31:13
| 204,516,692
| 2,077
| 634
|
Apache-2.0
| 2023-09-14T21:16:21
| 2019-08-26T16:30:16
|
SystemVerilog
|
UTF-8
|
Python
| false
| false
| 274
|
py
|
conftest.py
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
# Make tested code available for import
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
|
493a5bfd20a1a622b2ab11bcd26e7179a313c5a8
|
1eca7ab68f713f9134549be8cff40d953d784326
|
/empire/server/core/host_service.py
|
7bf1b11e74f388f40916ea3d3cf250e886d90c6b
|
[
"BSD-3-Clause"
] |
permissive
|
BC-SECURITY/Empire
|
65576ac931635cded054912a02ed5d02a1b41f8d
|
5b2ad2c2e9b9f996e40c484215dfea36fefc808d
|
refs/heads/main
| 2023-09-04T05:00:52.366894
| 2023-08-27T22:08:54
| 2023-08-27T22:08:54
| 199,975,883
| 3,651
| 601
|
BSD-3-Clause
| 2023-09-08T05:50:26
| 2019-08-01T04:22:31
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 404
|
py
|
host_service.py
|
from sqlalchemy.orm import Session
from empire.server.core.db import models
class HostService(object):
def __init__(self, main_menu):
self.main_menu = main_menu
@staticmethod
def get_all(db: Session):
return db.query(models.Host).all()
@staticmethod
def get_by_id(db: Session, uid: int):
return db.query(models.Host).filter(models.Host.id == uid).first()
|
21eecae92442a3d64b05931afb22add2c1060c0f
|
767c07db1fb131047af3d9b0a065b8fdc8aac9ab
|
/55-SQL/py_json.py
|
1711ed4e3eca092af590a33c4761913ce5817f08
|
[] |
no_license
|
DUanalytics/pyAnalytics
|
e52c5469da30a5f436ec0f3120d9f15fb82fd9b3
|
107a08bebe46ea51afccfeae4a666213bb405d41
|
refs/heads/master
| 2023-07-08T04:32:54.758902
| 2023-07-03T14:37:04
| 2023-07-03T14:37:04
| 202,094,535
| 394
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,272
|
py
|
py_json.py
|
#json
#-----------------------------
#%
import json
path='/duwork/downloads/pydata/ch02/usagov_bitly_data2012-03-16-1331923249.txt'
open(path).readline()
records=[json.loads(line) for line in open(path)]
records[0]['tz']
print(records[0]['tz'])
#time_zones=[rec['tz'] for rec in records] # error
time_zones=[rec['tz'] for rec in records if 'tz' in rec] #
time_zones[:10]
print(time_zones[:10])
# Counting
def get_counts(sequence):
counts={}
for x in sequence:
if x in counts:
counts[x] +=1
else:
counts[x] =1
return counts
from collections import defaultdict
def get_counts2(sequence):
counts=defaultdict(int)
for x in sequence:
count[x] += 1
return counts
counts= get_counts(time_zones)
print('Total Time Zones Cities', len(time_zones))
counts['America/New_York']
print('New York Count', counts['America/New_York'])
print('Sao Paulo Count', counts['America/Sao_Paulo'])
# Top Ten Time Zones and their count
def top_counts(count_dict, n=10):
value_key_pairs = [(count,tz) for tz, count in count_dict.items()]
value_key_pairs.sort()
return value_key_pairs[-n:]
print("Top Ten TZ", top_counts(counts))
from collections import Counter
counts = Counter(time_zones)
print(counts.most_common(10))
# Counting Time zones with Pandas
from pandas import DataFrame, Series
frame = DataFrame(records)
print(frame)
print(frame['tz'][:10])
tz_counts = frame['tz'].value_counts()
print(tz_counts[:10]) # Frequency Table
clean_tz = frame['tz'].fillna('Missing')
clean_tz[clean_tz==''] = 'Unknown'
tz_counts = clean_tz.value_counts()
print("No Counts", tz_counts[:10])
import numpy as np
import matplotlib as plt
plt.interactive(True)
print("Drawing Plot")
tz_counts[:10].plot(kind='barh',rot=0)
plt.interactive(True)
plt.pyplot.show()
#plt.interactive(False)
#plt.pyplot.show()
print(frame['a'][1])
print(frame['a'][50])
import pandas
results = Series([x.split()[0] for x in frame.a.dropna()])
print(results[:5])
cframe = frame[frame.a.notnull()]
operating_system = np.where(cframe['a'].str.contains('Windows'), 'Windows','Not Windows')
print(operating_system[:15])
print(cframe['a'])
by_tz_os = cframe.groupby(['tz',operating_system])
agg_counts = by_tz_os.size().unstack().fillna(0)
print(agg_counts[:10])
# Sort in Ascending Order
indexer = agg_counts.sum(1).argsort()
print(indexer[:10])
count_subset = agg_counts.take(indexer)[-10:]
#count_subset = agg_counts.take(indexer)
import matplotlib
print(count_subset)
#print(matplotlib.get_backend())
#matplotlib.use('TkAgg')
#print(matplotlib.get_backend())
#plt.interactive(False) # Change to True
count_subset.plot(kind='barh',stacked=True)
import matplotlib.pyplot as p
p.plot(range(20),range(20))
p.show()
#Out[2]: [<matplotlib.lines.Line2D object at 0xa64932c>]
p.show()
#import matplotlib.pyplot as p
#vi /home/du/.local/lib/python3.6/site-packages/matplotlib/mpl-data/matplotlibrc
results = Series([x.split()[0] for x in frame.a.dropna()])
print('Results are ', results[:5])
print('Results are --', results.value_counts()[:8])
cframe = frame[frame.a.notnull()]
operating_system = np.where(cframe['a'].str.contains('Windows'),'Windows','Non Windows')
print(operating_system[:5])
|
7e1300ef3cd69ba832ff122e005690e979b8f6a0
|
15eb68a30bd1bcd8c153ce3c8774e09ef3f4135d
|
/NightlyTests/tensorflow/eager/test_keras_transformer.py
|
32d01c62fc627fe6045264f005ebf7e1caf41792
|
[
"BSD-3-Clause"
] |
permissive
|
quic/aimet
|
77a984af68fc3c46d98c707d18a14c95a3efdacf
|
5a406e657082b6a4f6e4bf48f0e46e085cb1e351
|
refs/heads/develop
| 2023-08-21T12:51:10.500286
| 2023-08-18T18:35:39
| 2023-08-18T18:35:39
| 257,688,216
| 1,676
| 339
|
NOASSERTION
| 2023-09-08T06:59:39
| 2020-04-21T18:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 4,857
|
py
|
test_keras_transformer.py
|
# /usr/bin/env python3.8
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import json
import pytest
from tensorflow import keras
import tensorflow as tf
import numpy as np
from aimet_tensorflow.keras.quantsim import QuantizationSimModel
@pytest.mark.skip("Disable tests that requires eager execution")
def test_quantizable_mha_export_backwards_pass():
vocab_size = 20000 # Only consider the top 20k words
maxlen = 200 # Only consider the first 200 words of each movie review
embed_dim = 32 # Embedding size for each token
num_heads = 2 # Number of attention heads
ff_dim = 32 # Hidden layer size in feed forward network inside transformer
inputs = keras.layers.Input(shape=(maxlen,))
# Embedding Layer
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = keras.layers.Embedding(input_dim=maxlen, output_dim=embed_dim)(positions)
x = keras.layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)(inputs)
x = x + positions
# Transformer Block
x = keras.layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)(x, x)
x = keras.layers.Dropout(0.1)(x)
x = keras.layers.LayerNormalization(epsilon=1e-6)(x)
x = keras.layers.Dense(ff_dim, activation="relu")(x)
x = keras.layers.Dense(embed_dim)(x)
x = keras.layers.Dropout(0.1)(x)
x = keras.layers.LayerNormalization(epsilon=1e-6)(x)
# Output layers
x = keras.layers.GlobalAveragePooling1D()(x)
x = keras.layers.Dropout(0.1)(x)
x = keras.layers.Dense(20, activation="relu")(x)
x = keras.layers.Dropout(0.1)(x)
outputs = keras.layers.Dense(2, activation="softmax")(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs)
# STAGE 3 MODEL - model created using QuantSim
quantized_model = QuantizationSimModel(functional_model)
train_inputs = np.random.randint(1, 20000, (1024, 200))
train_outputs = np.random.randint(0, 2, (1024,))
val_inputs = np.random.randint(1, 20000, (256, 200))
val_outputs = np.random.randint(0, 2, (256,))
quantized_model.compute_encodings(lambda m, _: m(val_inputs), None)
quantized_model.export('./data', 'pre_qat_mha')
for wrapper in quantized_model.quant_wrappers():
for quantizer in wrapper.input_quantizers:
quantizer.enable()
for quantizer in wrapper.output_quantizers:
quantizer.enable()
with open("./data/pre_qat_mha.encodings", "r") as encodings_file:
pre_encodings = json.load(encodings_file)
quantized_model.model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
quantized_model.model.fit(
train_inputs, train_outputs, batch_size=32, epochs=1, validation_data=(val_inputs, val_outputs)
)
quantized_model.compute_encodings(lambda m, _: m(val_inputs), None)
quantized_model.export('./data', 'post_qat_mha')
with open("./data/post_qat_mha.encodings", "r") as encodings_file:
post_encodings = json.load(encodings_file)
assert pre_encodings != post_encodings
|
42ac747defdb68af125dda3849d769438813009f
|
0b134572e3ac3903ebb44df6d4138cbab9d3327c
|
/app/grandchallenge/workstations/migrations/0010_workstationimage_latest_shimmed_version.py
|
d0543be23d18f9b7555f93316391de96b726e399
|
[
"Apache-2.0"
] |
permissive
|
comic/grand-challenge.org
|
660de3bafaf8f4560317f1dfd9ae9585ec272896
|
dac25f93b395974b32ba2a8a5f9e19b84b49e09d
|
refs/heads/main
| 2023-09-01T15:57:14.790244
| 2023-08-31T14:23:04
| 2023-08-31T14:23:04
| 4,557,968
| 135
| 53
|
Apache-2.0
| 2023-09-14T13:41:03
| 2012-06-05T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 447
|
py
|
0010_workstationimage_latest_shimmed_version.py
|
# Generated by Django 3.2.13 on 2022-06-02 08:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("workstations", "0009_alter_workstation_logo"),
]
operations = [
migrations.AddField(
model_name="workstationimage",
name="latest_shimmed_version",
field=models.CharField(default="", editable=False, max_length=8),
),
]
|
e65c49e77495d83236cb468f3c91cdcb2c223a47
|
ed75170ffe743eea5f2fedd518c21b61de55f879
|
/test/bats/BATSSeedFinder_test.py
|
10fa1851571ab4c0645c77344eb4d8087c0bb868
|
[
"MIT"
] |
permissive
|
intive-DataScience/tbats
|
d294fe6d3dcb4ec0b2fc0db5e6aaaae08ade14b5
|
184bd635e1aea6bd1dd0ac7fa2339257b9ca6bdb
|
refs/heads/master
| 2023-04-30T02:25:00.961248
| 2023-04-17T10:20:31
| 2023-04-17T10:20:31
| 162,722,338
| 162
| 21
|
MIT
| 2022-12-09T07:30:53
| 2018-12-21T14:19:21
|
Python
|
UTF-8
|
Python
| false
| false
| 8,858
|
py
|
BATSSeedFinder_test.py
|
import numpy as np
import pytest
from tbats.bats.Components import Components
from tbats.abstract.ComponentMatrix import ComponentMatrix
from tbats.bats.SeedFinder import SeedFinder
class TestBATSSeedFinder(object):
@pytest.mark.parametrize(
"seasonal_periods, expected_mask",
[
[ # no periods means no mask
[], [],
],
[ # one period always produces a mask with one zero
[6], [0],
],
[ # two periods with no common divisor produce a mask of zeroes
[3, 7], [0, 0],
],
[ # If one period is a subperiod of the other, the mask contains 1 for the smaller period
[3, 5, 6, 24], [1, 0, 1, 0],
],
[ # If one period is a subperiod of the other, the mask contains 1 for the smaller period
[2, 5, 15, 16], [1, 1, 0, 0],
],
[ # If two periods have a common divisor then mask for the larger one contains this divisor
[4, 6], [0, -2],
],
[
# If more than two periods have a common divisor then mask for the largest one contains divisor from smallest period
[12, 42, 44], [0, -6, -4], # -4 instead of -2
],
[
# If more than two periods have a common divisor then mask for the larger one contains divisor from smaller period
[9, 16, 24], [0, 0, -3], # -3 instead of -4
],
[ # being a subperiod is more important than having a divisor
[4, 6, 12], [1, 1, 0],
],
[ # divisors and periods together
[4, 5, 10, 14, 15], [0, 1, -2, -2, -5],
],
[ # divisors and periods together
[7, 9, 11, 12, 22, 30, 33], [0, 0, 1, -3, -2, -3, -3],
],
[ # divisors and periods together
[7, 9, 11, 12, 22, 30, 44], [0, 0, 1, -3, 1, -3, -4],
],
]
)
def test_prepare_mask(self, seasonal_periods, expected_mask):
mask = SeedFinder.prepare_mask(seasonal_periods)
assert np.array_equal(expected_mask, mask)
@pytest.mark.parametrize(
"seasonal_periods, w_tilda, expected",
[
[ # no periods means nothing to cut
[], [1], np.zeros((0, 1)),
],
[ # cut last param for each season
[2],
[
[1, 1, 2], # alpha, 2 seasonal params
[1, 3, 4],
],
[
[1],
[3],
],
],
[ # two periods, one is a sub-period of the other, cut the sub-period out entirely
[2, 4],
[
[1, 1, 2, 0.1, 0.2, 0.3, 0.4],
[1, 3, 4, 0.5, 0.6, 0.7, 0.8],
],
[
[0.1, 0.2, 0.3],
[0.5, 0.6, 0.7],
],
],
[ # two periods with common divisor, cut out whole divisor from the larger one
[4, 6], # divisor = 2
[
[1, 1.1, 2, 3.1, 4, 0.1, 0.2, 0.3, 0.41, 0.5, 0.6],
[1, 1.2, 2, 3.2, 4, 0.1, 0.2, 0.3, 0.42, 0.5, 0.6],
[1, 1.3, 2, 3.3, 4, 0.1, 0.2, 0.3, 0.43, 0.5, 0.6],
],
[
[1.1, 2, 3.1, 0.1, 0.2, 0.3, 0.41],
[1.2, 2, 3.2, 0.1, 0.2, 0.3, 0.42],
[1.3, 2, 3.3, 0.1, 0.2, 0.3, 0.43],
],
],
[ # period 2 has a common divisor with period 6, period 2 is a sub-period of 4 and 6
[2, 4, 6],
[
[1, 0.01, 0.02, 0.11, 0.2, 0.31, 0.4, 11, 2, 3, 41, 5, 6],
[1, 0.01, 0.02, 0.12, 0.2, 0.32, 0.4, 12, 2, 3, 42, 5, 6],
],
[
[0.11, 0.2, 0.31, 11, 2, 3, 41],
[0.12, 0.2, 0.32, 12, 2, 3, 42],
],
],
]
)
def test_prepare_seasonal_params(self, seasonal_periods, w_tilda, expected):
components = Components(seasonal_periods=seasonal_periods)
w_tilda = ComponentMatrix(w_tilda, components)
adj = SeedFinder(components)
new_seasonal_params, _ = adj.prepare_seasonal_params(w_tilda)
assert np.array_equal(expected, new_seasonal_params)
@pytest.mark.parametrize(
"component_params, w_tilda, expected",
[
[
dict(), # default params
[
[1], # 1 only
[1],
],
[
[1], # 1 only
[1],
],
],
[
dict(use_trend=True),
[
[1, 1.1], # 1, beta
[1, 2.1],
[1, 3.1],
],
[
[1, 1.1], # 1, beta
[1, 2.1],
[1, 3.1],
],
],
[ # ARMA should not be included in linear regression
dict(use_arma_errors=True, p=2, q=1),
[
[1, 1.1, 1.2, 1.3], # 1, p1, p2, q1
[2, 2.1, 2.2, 2.3],
[3, 3.1, 3.2, 3.3],
],
[
[1], # 1,
[2],
[3],
],
],
[ # one season, should simply remove last seasonal parameter
dict(seasonal_periods=(3)),
[
[1, 1.1, 1.2, 1.3], # 1, s1, s2, s3
],
[
[1, 1.1, 1.2], # 1, s1, s2
],
],
[ # two seasons, where one is a sub-season of the other, sub-season should be removed from params
dict(seasonal_periods=(2, 4)),
[
[1, 1.1, 1.2, 2.1, 2.2, 2.3, 2.4], # 1, s11, s12, s21, s22, s23, s24
],
[
[1, 2.1, 2.2, 2.3], # 1, s21, s22, s23
],
],
[ # two seasons, where one has a common divisor of 2 with the other,
# params for longer period should be shrunk by the divisor=2
dict(seasonal_periods=(4, 6)),
[
[1, 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6],
],
[
[1, 1.1, 1.2, 1.3, 2.1, 2.2, 2.3, 2.4],
],
],
]
)
def test_conversions(self, component_params, w_tilda, expected):
components = Components(**component_params)
converter = SeedFinder(components)
matrix = converter.to_matrix_for_linear_regression(w_tilda)
assert np.array_equal(expected, matrix)
@pytest.mark.parametrize(
"component_params, lr_coefs, expected_x0",
[
[
dict(), # default params
[1.],
[1.],
],
[ # ARMA coefs should be added back as zeros
dict(use_trend=True, use_damped_trend=True, use_arma_errors=True, p=1, q=2),
[0.6, 0.5], # alpha, beta
[0.6, 0.5, 0., 0., 0.],
],
[ # Parameters for single season should sum to 0, last param should be added back
dict(seasonal_periods=(3)),
[0.6, 2, 4], # alpha, s1, s2
[0.6, 0, 2, -2], # alpha, s1, s2, s3
],
[ # When one season is a sub-season of other it should be added back as zeros
dict(seasonal_periods=(3, 6)),
[0.6, 2, 4, 6, 8, 10], # alpha, s21, s22, ..., s25
[0.6, 0, 0, 0, -3, -1, 1, 3, 5, -5], # alpha, s11, s12, s13, s21, ..., s25, s26
],
[ # When there is a divisor of 2 between two seasons, the longer season should receive 2 params back
dict(seasonal_periods=(4, 6)),
[0.6, 2, 4, 6, 1, 2, 3, 6], # alpha, s11, s12, s13, s21, s22, s23, ..., s24
[0.6, -1, 1, 3, -3, -1, 0, 1, 4, -2, -2], # alpha, s11, s12, s13,, s14, s21, ..., s25, s26
],
]
)
def test_back_conversions(self, component_params, lr_coefs, expected_x0):
components = Components(**component_params)
converter = SeedFinder(components)
x0 = converter.from_linear_regression_coefs_to_x0(lr_coefs)
assert np.array_equal(expected_x0, x0)
|
8ea38a43ad69538696419d4316bf86f1159d0b29
|
9a0eb3e292d57b59198c7c66a994372ced9cfa5b
|
/nodes/1.x/python/System.UserName.py
|
ac95a8e8027d8e96e5f1e00097c907ba5859cc8a
|
[
"MIT"
] |
permissive
|
andydandy74/ClockworkForDynamo
|
544ddf0893f5c0072fca7934f4e128001771f767
|
528400c667c4c3f2b51814af84e85c8fab8a8059
|
refs/heads/master
| 2023-08-19T03:07:33.489926
| 2023-08-13T04:31:17
| 2023-08-13T04:31:17
| 15,043,988
| 184
| 100
|
MIT
| 2023-09-04T18:47:40
| 2013-12-09T10:11:01
|
Python
|
UTF-8
|
Python
| false
| false
| 68
|
py
|
System.UserName.py
|
import clr
from System import Environment
OUT = Environment.UserName
|
655fd4faa8ceb97f512b4a08b9be581011fcfc14
|
279f415dd1e06c594c6c87deda57e201c73c4542
|
/test/espnet2/schedulers/cosine_anneal_warmup_restart.py
|
c27bd0e70a74392e989fdade07abab693b8789f8
|
[
"Apache-2.0"
] |
permissive
|
espnet/espnet
|
f7ba47271c1a6b1ed606dbbfb04a7f14220bb585
|
bcd20948db7846ee523443ef9fd78c7a1248c95e
|
refs/heads/master
| 2023-08-28T23:43:34.238336
| 2023-08-23T02:51:39
| 2023-08-23T02:51:39
| 114,054,873
| 7,242
| 2,244
|
Apache-2.0
| 2023-09-14T08:01:11
| 2017-12-13T00:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 650
|
py
|
cosine_anneal_warmup_restart.py
|
import pytest
import torch
from espnet2.schedulers.cosine_anneal_warmup_restart import (
CosineAnnealingWarmupRestarts,
)
@pytest.mark.parametrize("first_cycle_steps", [10, 20])
def test_CosineAnnealingWarmupRestarts(first_cycle_steps):
linear = torch.nn.Linear(2, 2)
opt = torch.optim.SGD(linear.parameters(), 0.1)
sch = CosineAnnealingWarmupRestarts(
opt,
first_cycle_steps,
cycle_mult=1.0,
max_lr=0.1,
min_lr=0.001,
warmup_steps=0,
gamma=1.0,
)
lr = opt.param_groups[0]["lr"]
opt.step()
sch.step()
lr2 = opt.param_groups[0]["lr"]
assert lr != lr2
|
cd9b00754f1c2e3ade98b85fcda2f83bd3c6a337
|
ec5d1ad8418dd62039e1dd8d6d2129ed3d7504de
|
/mediator/python/chat_room_mediator.py
|
34ef2674c973b8c91c107761c57a7cf8e3f412cc
|
[] |
no_license
|
yusufyilmazfr/tasarim-desenleri-turkce-kaynak
|
88feba7369fd4f2609f9dfe27d314f87a5214a7b
|
f666e998247d683a9f734f8c8802ab38c7da6915
|
refs/heads/master
| 2023-09-01T11:29:07.908507
| 2023-07-31T07:08:29
| 2023-07-31T07:08:29
| 244,465,123
| 3,298
| 448
| null | 2023-08-20T10:37:03
| 2020-03-02T20:10:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 666
|
py
|
chat_room_mediator.py
|
from abstract_chat_room_mediator import AbstractChatRoomMediator
from chat_user import ChatUser
class ChatRoomMediator(AbstractChatRoomMediator):
"""
Mediator (IChatRoomMediator) arayüzünü uygular.
Colleague (User) nesneleri arasındaki iletişimi koordine eder.
UML diyagramındaki ConcreteMediator yapısına denk gelmektedir.
"""
def __init__(self):
self._user_dictionary = {}
def send_message(self, message: str, user_id: int):
user: ChatUser = self._user_dictionary[user_id]
user.receive_message(message)
def add_user_in_room(self, user: ChatUser):
self._user_dictionary[user.u_id] = user
|
06b067a75624a672b161513f229f74ad19254b91
|
b07a323ef3fd85b895b2804b1340c05cde04ceee
|
/dotmap/__init__.py
|
671b003c922ad91b9d296d6c70f28d9fb17988da
|
[
"MIT"
] |
permissive
|
drgrib/dotmap
|
ccb7ac45794dd7941e3b674b65355630f03bcd25
|
c7a778a95a01a7a0fd2f816a07c62de2478260c6
|
refs/heads/master
| 2023-08-27T05:04:15.168913
| 2022-04-06T16:26:33
| 2022-04-06T16:26:33
| 40,564,835
| 454
| 58
|
MIT
| 2022-04-06T16:25:06
| 2015-08-11T20:56:21
|
Python
|
UTF-8
|
Python
| false
| false
| 21,040
|
py
|
__init__.py
|
from __future__ import print_function
from collections import OrderedDict
try:
from collections.abc import MutableMapping, Iterable
except ImportError:
from collections import MutableMapping, Iterable
from json import dumps
from pprint import pprint
from sys import version_info
from inspect import ismethod
# for debugging
def here(item=None):
out = 'here'
if item != None:
out += '({})'.format(item)
print(out)
__all__ = ['DotMap']
class DotMap(MutableMapping, OrderedDict):
def __init__(self, *args, **kwargs):
self._map = OrderedDict()
self._dynamic = kwargs.pop('_dynamic', True)
self._prevent_method_masking = kwargs.pop('_prevent_method_masking', False)
_key_convert_hook = kwargs.pop('_key_convert_hook', None)
trackedIDs = kwargs.pop('_trackedIDs', {})
if args:
d = args[0]
# for recursive assignment handling
trackedIDs[id(d)] = self
src = []
if isinstance(d, MutableMapping):
src = self.__call_items(d)
elif isinstance(d, Iterable):
src = d
for k,v in src:
if self._prevent_method_masking and k in reserved_keys:
raise KeyError('"{}" is reserved'.format(k))
if _key_convert_hook:
k = _key_convert_hook(k)
if isinstance(v, dict):
idv = id(v)
if idv in trackedIDs:
v = trackedIDs[idv]
else:
trackedIDs[idv] = v
v = self.__class__(v, _dynamic=self._dynamic, _prevent_method_masking = self._prevent_method_masking, _key_convert_hook =_key_convert_hook, _trackedIDs = trackedIDs)
if type(v) is list:
l = []
for i in v:
n = i
if isinstance(i, dict):
idi = id(i)
if idi in trackedIDs:
n = trackedIDs[idi]
else:
trackedIDs[idi] = i
n = self.__class__(i, _dynamic=self._dynamic, _key_convert_hook =_key_convert_hook, _prevent_method_masking = self._prevent_method_masking)
l.append(n)
v = l
self._map[k] = v
if kwargs:
for k,v in self.__call_items(kwargs):
if self._prevent_method_masking and k in reserved_keys:
raise KeyError('"{}" is reserved'.format(k))
if _key_convert_hook:
k = _key_convert_hook(k)
self._map[k] = v
def __call_items(self, obj):
if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
return obj.iteritems()
else:
return obj.items()
def items(self):
return self.iteritems()
def iteritems(self):
return self.__call_items(self._map)
def __iter__(self):
return self._map.__iter__()
def next(self):
return self._map.next()
def __setitem__(self, k, v):
self._map[k] = v
def __getitem__(self, k):
if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
# automatically extend to new DotMap
self[k] = self.__class__()
return self._map[k]
def __setattr__(self, k, v):
if k in {'_map','_dynamic', '_ipython_canary_method_should_not_exist_', '_prevent_method_masking'}:
super(DotMap, self).__setattr__(k,v)
elif self._prevent_method_masking and k in reserved_keys:
raise KeyError('"{}" is reserved'.format(k))
else:
self[k] = v
def __getattr__(self, k):
if k.startswith('__') and k.endswith('__'):
raise AttributeError(k)
if k in {'_map','_dynamic','_ipython_canary_method_should_not_exist_'}:
return super(DotMap, self).__getattr__(k)
try:
v = super(self.__class__, self).__getattribute__(k)
return v
except AttributeError:
pass
try:
return self[k]
except KeyError:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{k}'") from None
def __delattr__(self, key):
return self._map.__delitem__(key)
def __contains__(self, k):
return self._map.__contains__(k)
def __add__(self, other):
if self.empty():
return other
else:
self_type = type(self).__name__
other_type = type(other).__name__
msg = "unsupported operand type(s) for +: '{}' and '{}'"
raise TypeError(msg.format(self_type, other_type))
def __str__(self, seen = None):
items = []
seen = {id(self)} if seen is None else seen
for k,v in self.__call_items(self._map):
# circular assignment case
if isinstance(v, self.__class__):
if id(v) in seen:
items.append('{0}={1}(...)'.format(k, self.__class__.__name__))
else:
seen.add(id(v))
items.append('{0}={1}'.format(k, v.__str__(seen)))
else:
items.append('{0}={1}'.format(k, repr(v)))
joined = ', '.join(items)
out = '{0}({1})'.format(self.__class__.__name__, joined)
return out
def __repr__(self):
return str(self)
def toDict(self, seen = None):
if seen is None:
seen = {}
d = {}
seen[id(self)] = d
for k,v in self.items():
if issubclass(type(v), DotMap):
idv = id(v)
if idv in seen:
v = seen[idv]
else:
v = v.toDict(seen = seen)
elif type(v) in (list, tuple):
l = []
for i in v:
n = i
if issubclass(type(i), DotMap):
idv = id(n)
if idv in seen:
n = seen[idv]
else:
n = i.toDict(seen = seen)
l.append(n)
if type(v) is tuple:
v = tuple(l)
else:
v = l
d[k] = v
return d
def pprint(self, pformat='dict'):
if pformat == 'json':
print(dumps(self.toDict(), indent=4, sort_keys=True))
else:
pprint(self.toDict())
def empty(self):
return (not any(self))
# proper dict subclassing
def values(self):
return self._map.values()
# ipython support
def __dir__(self):
return self.keys()
@classmethod
def parseOther(self, other):
if issubclass(type(other), DotMap):
return other._map
else:
return other
def __cmp__(self, other):
other = DotMap.parseOther(other)
return self._map.__cmp__(other)
def __eq__(self, other):
other = DotMap.parseOther(other)
if not isinstance(other, dict):
return False
return self._map.__eq__(other)
def __ge__(self, other):
other = DotMap.parseOther(other)
return self._map.__ge__(other)
def __gt__(self, other):
other = DotMap.parseOther(other)
return self._map.__gt__(other)
def __le__(self, other):
other = DotMap.parseOther(other)
return self._map.__le__(other)
def __lt__(self, other):
other = DotMap.parseOther(other)
return self._map.__lt__(other)
def __ne__(self, other):
other = DotMap.parseOther(other)
return self._map.__ne__(other)
def __delitem__(self, key):
return self._map.__delitem__(key)
def __len__(self):
return self._map.__len__()
def clear(self):
self._map.clear()
def copy(self):
return self.__class__(self)
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo=None):
return self.copy()
def get(self, key, default=None):
return self._map.get(key, default)
def has_key(self, key):
return key in self._map
def iterkeys(self):
return self._map.iterkeys()
def itervalues(self):
return self._map.itervalues()
def keys(self):
return self._map.keys()
def pop(self, key, default=None):
return self._map.pop(key, default)
def popitem(self):
return self._map.popitem()
def setdefault(self, key, default=None):
return self._map.setdefault(key, default)
def update(self, *args, **kwargs):
if len(args) != 0:
self._map.update(*args)
self._map.update(kwargs)
def viewitems(self):
return self._map.viewitems()
def viewkeys(self):
return self._map.viewkeys()
def viewvalues(self):
return self._map.viewvalues()
@classmethod
def fromkeys(cls, seq, value=None):
d = cls()
d._map = OrderedDict.fromkeys(seq, value)
return d
def __getstate__(self): return self.__dict__
def __setstate__(self, d): self.__dict__.update(d)
# bannerStr
def _getListStr(self,items):
out = '['
mid = ''
for i in items:
mid += ' {}\n'.format(i)
if mid != '':
mid = '\n' + mid
out += mid
out += ']'
return out
def _getValueStr(self,k,v):
outV = v
multiLine = len(str(v).split('\n')) > 1
if multiLine:
# push to next line
outV = '\n' + v
if type(v) is list:
outV = self._getListStr(v)
out = '{} {}'.format(k,outV)
return out
def _getSubMapDotList(self, pre, name, subMap):
outList = []
if pre == '':
pre = name
else:
pre = '{}.{}'.format(pre,name)
def stamp(pre,k,v):
valStr = self._getValueStr(k,v)
return '{}.{}'.format(pre, valStr)
for k,v in subMap.items():
if isinstance(v,DotMap) and v != DotMap():
subList = self._getSubMapDotList(pre,k,v)
outList.extend(subList)
else:
outList.append(stamp(pre,k,v))
return outList
def _getSubMapStr(self, name, subMap):
outList = ['== {} =='.format(name)]
for k,v in subMap.items():
if isinstance(v, self.__class__) and v != self.__class__():
# break down to dots
subList = self._getSubMapDotList('',k,v)
# add the divit
# subList = ['> {}'.format(i) for i in subList]
outList.extend(subList)
else:
out = self._getValueStr(k,v)
# out = '> {}'.format(out)
out = '{}'.format(out)
outList.append(out)
finalOut = '\n'.join(outList)
return finalOut
def bannerStr(self):
lines = []
previous = None
for k,v in self.items():
if previous == self.__class__.__name__:
lines.append('-')
out = ''
if isinstance(v, self.__class__):
name = k
subMap = v
out = self._getSubMapStr(name,subMap)
lines.append(out)
previous = self.__class__.__name__
else:
out = self._getValueStr(k,v)
lines.append(out)
previous = 'other'
lines.append('--')
s = '\n'.join(lines)
return s
reserved_keys = {i for i in dir(DotMap) if not i.startswith('__') and not i.endswith('__')}
if __name__ == '__main__':
# basics
print('\n== basics ==')
d = {
'a':1,
'b':2,
'subD': {'c':3, 'd':4}
}
dd = DotMap(d)
print(dd)
print(len(dd))
print(dd.copy())
print(dd)
print(OrderedDict.fromkeys([1,2,3]))
print(DotMap.fromkeys([1,2,3], 'a'))
print(dd.get('a'))
print(dd.get('f',33))
print(dd.get('f'))
print(dd.has_key('a'))
dd.update([('rat',5),('bum',4)], dog=7,cat=9)
dd.update({'lol':1,'ba':2})
print(dd)
print
for k in dd:
print(k)
print('a' in dd)
print('c' in dd)
dd.c.a = 1
print(dd.toDict())
dd.pprint()
print
print(dd.values())
dm = DotMap(name='Steve', job='programmer')
print(dm)
print(issubclass(dm.__class__, dict))
am = DotMap()
am.some.deep.path.cuz.we = 'can'
print(am)
del am.some.deep
print(am)
parentDict = {
'name': 'Father1',
'children': [
{'name': 'Child1'},
{'name': 'Child2'},
{'name': 'Child3'},
]
}
parent = DotMap(parentDict)
print([x.name for x in parent.children])
# pickle
print('\n== pickle ==')
import pickle
s = pickle.dumps(parent)
d = pickle.loads(s)
print(d)
# init from DotMap
print('\n== init from DotMap ==')
e = DotMap(d)
print(e)
# empty
print('\n== empty() ==')
d = DotMap()
print(d.empty())
d.a = 1
print(d.empty())
print()
x = DotMap({'a': 'b'})
print(x.b.empty()) # True (and creates empty DotMap)
print(x.b) # DotMap()
print(x.b.empty()) # also True
# _dynamic
print('\n== _dynamic ==')
d = DotMap()
d.still.works
print(d)
d = DotMap(_dynamic=False)
try:
d.no.creation
print(d)
except AttributeError:
print('AttributeError caught')
d = {'sub':{'a':1}}
dm = DotMap(d)
print(dm)
dm.still.works
dm.sub.still.works
print(dm)
dm2 = DotMap(d,_dynamic=False)
try:
dm.sub.yes.creation
print(dm)
dm2.sub.no.creation
print(dm)
except AttributeError:
print('AttributeError caught')
# _dynamic
print('\n== toDict() ==')
conf = DotMap()
conf.dep = DotMap(facts=DotMap(operating_systems=DotMap(os_CentOS_7=True), virtual_data_centers=[DotMap(name='vdc1', members=['sp1'], options=DotMap(secret_key='badsecret', description='My First VDC')), DotMap(name='vdc2', members=['sp2'], options=DotMap(secret_key='badsecret', description='My Second VDC'))], install_node='192.168.2.200', replication_group_defaults=DotMap(full_replication=False, enable_rebalancing=False, description='Default replication group description', allow_all_namespaces=False), node_defaults=DotMap(ntp_servers=['192.168.2.2'], ecs_root_user='root', dns_servers=['192.168.2.2'], dns_domain='local', ecs_root_pass='badpassword'), storage_pools=[DotMap(name='sp1', members=['192.168.2.220'], options=DotMap(ecs_block_devices=['/dev/vdb'], description='My First SP')), DotMap(name='sp2', members=['192.168.2.221'], options=DotMap(protected=False, ecs_block_devices=['/dev/vdb'], description='My Second SP'))], storage_pool_defaults=DotMap(cold_storage_enabled=False, protected=False, ecs_block_devices=['/dev/vdc'], description='Default storage pool description'), virtual_data_center_defaults=DotMap(secret_key='badsecret', description='Default virtual data center description'), management_clients=['192.168.2.0/24'], replication_groups=[DotMap(name='rg1', members=['vdc1', 'vdc2'], options=DotMap(description='My RG'))]), lawyers=DotMap(license_accepted=True))
print(conf.dep.toDict()['facts']['replication_groups'])
# recursive assignment
print('\n== recursive assignment ==')
# dict
d = dict()
d['a'] = 5
print(id(d))
d['recursive'] = d
print(d)
print(d['recursive']['recursive']['recursive'])
# DotMap
m = DotMap()
m.a = 5
print(id(m))
m.recursive = m
print(m.recursive.recursive.recursive)
print(m)
print(m.toDict())
# kwarg
print('\n== kwarg ==')
def test(**kwargs):
print(kwargs)
class D:
def keys(self):
return ['a', 'b']
def __getitem__(self, key):
return 0
a = {'1':'a', '2':'b'}
b = DotMap(a, _dynamic=False)
o = OrderedDict(a)
test(**a)
test(**b.toDict())
test(**o)
test(**D())
# ordering
print('\n== ordering ==')
m = DotMap()
m.alpha = 1
m.bravo = 2
m.charlie = 3
m.delta = 4
for k,v in m.items():
print(k,v)
# subclassing
print('\n== subclassing ==')
d = DotMap()
o = OrderedDict()
print(isinstance(d, dict))
print(isinstance(o, dict))
e = DotMap(m)
print(e)
# deepcopy
print('\n== deepcopy ==')
import copy
t = DotMap()
t.a = 1
t.b = 3
f = copy.deepcopy(t)
t.a = 2
print(t)
print(f)
# copy order preservation
print('\n== copy order preservation ==')
t = DotMap()
t.a = 1
t.b = 2
t.c = 3
copies = []
print(id(t))
for i in range(3):
copyMap = copy.deepcopy(t)
copies.append(copyMap)
print(id(copyMap))
print()
for copyMap in copies:
for k,v in copyMap.items():
print(k,v)
print()
# bannerStr
print('\n== bannerStr ==')
t.cities.LA = 1
t.cities.DC = 2
t.cities.London.pop = 'many'
t.cities.London.weather = 'rain'
haiku = '\n'.join([
"Haikus are easy",
"But sometimes they don't make sense",
"Refrigerator",
])
t.haiku = haiku
t.teams.blue = 1
t.teams.red = 2
t.teams.green = 3
t.colors.blue = 1
t.colors.red = 2
t.colors.green = 3
t.numbers.short = list(range(4))
t.numbers.early = list(range(10))
t.numbers.backwards = list(range(10,-1,-1))
t.deepLog.deeper.Q = list(range(4))
print(t.bannerStr())
# sub-DotMap deepcopy
print('\n== sub-DotMap deepcopy ==')
import copy
l = []
d = {'d1': {'d2': ''}}
m = DotMap(d)
for i in range(3):
x = copy.deepcopy(m)
x.d1.d2 = i
l.append(x)
for m in l:
print(m)
# tuple toDict
print('\n== DotMap tuple toDict ==')
m = DotMap({'a': 1, 'b': (11, 22, DotMap({'c': 3}))})
d = m.toDict()
print(d)
# unpacking tests
'''
print('\n== Unpacking ==')
d = {'a':1}
print({**d})
m = DotMap(a=1)
print({**m.toDict()})
m = DotMap(a=1)
print({**m})
'''
print('\n== DotMap subclass ==')
class MyDotMap(DotMap):
def __getitem__(self, k):
return super(MyDotMap, self).__getitem__(k)
my = MyDotMap()
my.x.y.z = 3
print(my)
# subclass with existing property
class PropertyDotMap(MyDotMap):
def __init__(self, *args, **kwargs):
super(MyDotMap, self).__init__(*args, **kwargs)
self._myprop = MyDotMap({'nested': 123})
@property
def first(self):
return self._myprop
p = PropertyDotMap()
print(p.first)
print(p.first.nested)
p.first.second.third = 456
print(p.first.second.third)
print('\n== DotMap method masking ==')
# method masking tests
d = DotMap(a=1,get='mango')
d = DotMap((('a',1),('get','mango')))
d = DotMap({'a':1, 'get': 'mango'})
d = DotMap({'a':1, 'b': {'get': 'mango'}})
d.a = {'get':'mongo'}
try:
d = DotMap(a=1,get='mango', _prevent_method_masking = True)
raise RuntimeError("this should fail with KeyError")
except KeyError:
print('kwargs method masking ok')
try:
d = DotMap((('a',1),('get','mango')), _prevent_method_masking = True)
raise RuntimeError("this should fail with KeyError")
except KeyError:
print('iterable method masking ok')
try:
d = DotMap({'a':1, 'get': 'mango'}, _prevent_method_masking = True)
raise RuntimeError("this should fail with KeyError")
except KeyError:
print('dict method masking ok')
try:
d = DotMap({'a':1, 'b': {'get': 'mango'}}, _prevent_method_masking = True)
raise RuntimeError("this should fail with KeyError")
except KeyError:
print('nested dict method masking ok')
try:
d = DotMap({'a':1, 'b': {}}, _prevent_method_masking = True)
d.b.get = 7
raise RuntimeError("this should fail with KeyError")
except KeyError:
print('nested dict attrib masking ok')
print('\n== DotMap __init__, toDict, and __str__ with circular references ==')
a = { 'name': 'a'}
b = { 'name': 'b'}
c = { 'name': 'c', 'list': []}
# Create circular reference
a['b'] = b
b['c'] = c
c['a'] = a
c['list'].append(b)
print(a)
x = DotMap(a)
print(x)
y = x.toDict()
assert id(y['b']['c']['a']) == id(y)
assert id(y['b']['c']['list'][0]) == id(y['b'])
print(y)
# final print
print()
|
2dd88885bde8cd062ea9de97ba2f860d77946641
|
a381debad5dbcbfa83b549438ea6294c9e5a122e
|
/examples/drums.py
|
cc40c1503edefbe8ee3a81a3b777053c0ec2b9b6
|
[
"MIT"
] |
permissive
|
pimoroni/explorer-hat
|
6092ac8ab567a526ed27d7307883f26a6a8015ac
|
b65417f503d35bb74a2be70bfa72940c561f865f
|
refs/heads/master
| 2022-05-01T16:42:00.028028
| 2022-03-16T10:25:30
| 2022-03-16T10:25:30
| 29,976,470
| 165
| 77
|
MIT
| 2021-11-09T13:00:34
| 2015-01-28T16:25:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
drums.py
|
#!/usr/bin/env python
import signal
from sys import exit
try:
import pygame
except ImportError:
exit("This script requires the pygame module\nInstall with: sudo pip install pygame")
import explorerhat
print("""
This example turns your Explorer HAT into a drum kit!
Hit any touch pad to hear a drum sound.
Press CTRL+C to exit.
""")
LEDS = [4, 17, 27, 5]
samples = [
'sounds/hit.wav',
'sounds/thud.wav',
'sounds/clap.wav',
'sounds/crash.wav',
'sounds/hat.wav',
'sounds/smash.wav',
'sounds/rim.wav',
'sounds/ting.wav'
]
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.mixer.set_num_channels(16)
sounds = []
for x in range(8):
sounds.append(pygame.mixer.Sound(samples[x]))
def handle(ch, evt):
if ch > 4:
led = ch - 5
else:
led = ch - 1
if evt == 'press':
explorerhat.light[led].fade(0, 100, 0.1)
sounds[ch - 1].play(loops=0)
name = samples[ch - 1].replace('sounds/','').replace('.wav','')
print("{}!".format(name.capitalize()))
else:
explorerhat.light[led].off()
explorerhat.touch.pressed(handle)
explorerhat.touch.released(handle)
signal.pause()
|
7b584639696762a5dc3f7d609287c99ac8ba2c48
|
069c2295076c482afadfe6351da5ae02be8e18e6
|
/tests/forms_tests/field_tests/test_combofield.py
|
d433fdf2b3f789846a3c59bf37217578acdcb3b0
|
[
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Python-2.0.1",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] |
permissive
|
django/django
|
5eb557f57053631cd4f566f451e43197309dbeeb
|
c74a6fad5475495756a5bdb18b2cab2b68d429bc
|
refs/heads/main
| 2023-09-01T03:43:44.033530
| 2023-08-31T08:27:32
| 2023-08-31T08:27:32
| 4,164,482
| 73,530
| 38,187
|
BSD-3-Clause
| 2023-09-14T20:03:48
| 2012-04-28T02:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
test_combofield.py
|
from django.core.exceptions import ValidationError
from django.forms import CharField, ComboField, EmailField
from django.test import SimpleTestCase
class ComboFieldTest(SimpleTestCase):
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual("test@example.com", f.clean("test@example.com"))
with self.assertRaisesMessage(
ValidationError,
"'Ensure this value has at most 20 characters (it has 28).'",
):
f.clean("longemailaddress@example.com")
with self.assertRaisesMessage(
ValidationError, "'Enter a valid email address.'"
):
f.clean("not an email")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual("test@example.com", f.clean("test@example.com"))
with self.assertRaisesMessage(
ValidationError,
"'Ensure this value has at most 20 characters (it has 28).'",
):
f.clean("longemailaddress@example.com")
with self.assertRaisesMessage(
ValidationError, "'Enter a valid email address.'"
):
f.clean("not an email")
self.assertEqual("", f.clean(""))
self.assertEqual("", f.clean(None))
|
66edef2ed3ab76d28d331e964136a06caf1c59d2
|
90d02fee4d02962c9e3d03314cd1597c70bf2f8c
|
/asdf/__init__.py
|
92bbcf0fce2fff803ebd8e41f3793f8279f058fb
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
asdf-format/asdf
|
08e19f5d603c738b0ae94ccd1a339ff6b8cf4209
|
a5b2b2d94f2fc71746f896c6d322439a27dd0bdd
|
refs/heads/main
| 2023-08-17T17:06:20.828932
| 2023-08-08T10:53:27
| 2023-08-08T10:53:27
| 18,112,754
| 328
| 25
|
BSD-3-Clause
| 2023-09-13T15:57:22
| 2014-03-25T19:00:43
|
Python
|
UTF-8
|
Python
| false
| false
| 653
|
py
|
__init__.py
|
"""
asdf: Python library for reading and writing Advanced Scientific
Data Format (ASDF) files
"""
__all__ = [
"AsdfFile",
"Stream",
"open",
"IntegerType",
"ExternalArrayReference",
"info",
"__version__",
"ValidationError",
"get_config",
"config_context",
]
from ._convenience import info
from ._version import version as __version__
from .asdf import AsdfFile
from .asdf import open_asdf as open
from .config import config_context, get_config
from .exceptions import ValidationError
from .stream import Stream
from .tags.core import IntegerType
from .tags.core.external_reference import ExternalArrayReference
|
f1ad7c4a1624a4874ebb29e2dfce82469f6cfe5e
|
c703b8ac3b5545857f6c95efa2d61eaf7a664021
|
/setup.py
|
e6737a062b1e82df9c0638549c27794fe234147a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
iPERDance/iPERCore
|
d29681d229b3098b3517b1abf4f7ea65f579de73
|
fcf9a18ffd66bf3fdd3eea4153a3bc4785131848
|
refs/heads/main
| 2023-07-30T15:04:15.835396
| 2023-04-12T14:21:23
| 2023-04-12T14:21:23
| 313,664,064
| 2,520
| 339
|
Apache-2.0
| 2023-05-12T03:26:52
| 2020-11-17T15:36:25
|
Python
|
UTF-8
|
Python
| false
| false
| 15,077
|
py
|
setup.py
|
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import os
from os.path import exists
import platform
from setuptools import find_packages, setup
import subprocess
import sys
import re
TORCH_DIST = "https://download.pytorch.org/whl/torch_stable.html"
MMCV_DIST = "https://download.openmmlab.com/mmcv/dist"
PIP_VERSION = "20.2.4"
# cuda
# torch 1.8.1 +cu101 +cu102 +cu111
# torch 1.8.0 +cu101 cu102 +cu111
# torch 1.7.1 +cu101 cu102 +cu110
# torch 1.7.0 +cu101 cu102 +cu110
# torch 1.6.0 +cu101 cu102
VALID_CUDA = [10.1, 10.2, 11.0, 11.1]
DEFAULT_LINUX_CUDA_TORCH = {
"cu111": "1.8.1",
"cu110": "1.7.0",
"cu102": "1.7.0",
"cu101": "1.7.0"
}
DEFAULT_WINDOWS_CUDA_TORCH = {
"cu102": "1.6.0",
"cu101": "1.6.0"
}
PRECOMPILED_TORCH_CUDA_PAIRS = {
"1.8.1+cu111": {
"torch": "1.8.1+cu111",
"torchvision": "0.9.1+cu111",
"mmcv-full": "1.2.0",
"mmcv-dist": f"{MMCV_DIST}/cu111/torch1.8.1/index.html"
},
"1.8.1+cu102": {
"torch": "1.8.1+cu102",
"torchvision": "0.9.1+cu102",
"mmcv-full": "1.2.0",
"mmcv-dist": f"{MMCV_DIST}/cu102/torch1.8.1/index.html"
},
"1.8.1+cu101": {
"torch": "1.8.1+cu101",
"torchvision": "0.9.1+cu101",
"mmcv-full": "1.2.0",
"mmcv-dist": f"{MMCV_DIST}/cu101/torch1.8.1/index.html"
},
"1.7.0+cu110": {
"torch": "1.7.0+cu110",
"torchvision": "0.8.1+cu110",
"mmcv-full": "1.2.0",
"mmcv-dist": f"{MMCV_DIST}/cu110/torch1.7.0/index.html"
# "mmcv-full": "1.2.0+torch1.7.0+cu110"
},
"1.7.0+cu102": {
"torch": "1.7.0",
"torchvision": "0.8.1",
"mmcv-full": "1.2.0",
"mmcv-dist": f"{MMCV_DIST}/cu102/torch1.7.0/index.html"
# "mmcv-full": "1.2.0+torch1.7.0+cu102"
},
"1.7.0+cu101": {
"torch": "1.7.0+cu101",
"torchvision": "0.8.1+cu101",
"mmcv-full": "1.2.0",
"mmcv-dist": f"{MMCV_DIST}/cu101/torch1.7.0/index.html"
# "mmcv-full": "1.2.0+torch1.7.0+cu101"
},
"1.6.0+cu102": {
"torch": "1.6.0",
"torchvision": "0.7.0",
"mmcv-full": "1.2.0",
"mmcv-dist": f"{MMCV_DIST}/cu102/torch1.6.0/index.html"
# "mmcv-full": "1.1.5+torch1.6.0+cu102"
},
"1.6.0+cu101": {
"torch": "1.6.0+cu101",
"torchvision": "0.7.0+cu101",
"mmcv-full": "1.2.0",
"mmcv-dist": f"{MMCV_DIST}/cu101/torch1.6.0/index.html"
# "mmcv-full": "1.1.5+torch1.6.0+cu101"
}
}
WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS = {
"1.6.0+cu102": {
"torch": "https://download.pytorch.org/whl/cu102/torch-1.6.0-cp{PYTHON_VERSION}-cp{PYTHON_ABI_VERSION}-win_amd64.whl",
"torchvision": "https://download.pytorch.org/whl/cu102/torchvision-0.7.0-cp{PYTHON_VERSION}-cp{PYTHON_ABI_VERSION}-win_amd64.whl",
"mmcv-full": "1.1.5",
"mmcv-dist": f"{MMCV_DIST}/cu102/torch1.6.0/index.html"
},
"1.6.0+cu101": {
"torch": "https://download.pytorch.org/whl/cu101/torch-1.6.0%2Bcu101-cp{PYTHON_VERSION}-cp{PYTHON_ABI_VERSION}-win_amd64.whl",
"torchvision": "https://download.pytorch.org/whl/cu101/torchvision-0.7.0%2Bcu101-cp{PYTHON_VERSION}-cp{PYTHON_ABI_VERSION}-win_amd64.whl",
"mmcv-full": "1.1.5",
"mmcv-dist": f"{MMCV_DIST}/cu101/torch1.6.0/index.html"
}
}
def get_cuda_version() -> float:
"""
Get the cuda version of the system, make sure that the $CUDA_HOME or the $CUDA_PATH has been added into the system path.
Here the cuda version is aaa.b.ccc, and only return the version with aaa.b
Returns:
version (float): the cuda version.
"""
global VALID_CUDA
# In windows, the cuda_install.exe will set the `CUDA_PATH` to the system environmental variables.
if "CUDA_HOME" not in os.environ and "CUDA_PATH" in os.environ:
os.environ["CUDA_HOME"] = os.environ["CUDA_PATH"]
assert "CUDA_HOME" in os.environ, r"Cannot find the $CUDA_HOME in the environments. Please manually install the " \
r"CUDA >= 10.1, and set the $CUDA_HOME environment variable."
cuda_version_file = os.path.join(os.environ["CUDA_HOME"], "version.txt")
if os.path.exists(cuda_version_file):
# e.g. "CUDA Version 10.1.243", "CUDA Version 10.0.130"
with open(cuda_version_file) as f:
version_str = f.readline().replace("\n", "").replace("\r", "")
# "CUDA Version 10.1.243" -> ["CUDA", "Version", "10.1.243"] -> "10.1.243"
version = version_str.split(" ")[2]
# "10.1.243" -> "10.1" -> 10.1
version = float(".".join(version.split(".")[0:2]))
else:
# run `nvcc -V`
# """nvcc: NVIDIA (R) Cuda compiler driver
# Copyright (c) 2005-2019 NVIDIA Corporation
# Built on Sun_Jul_28_19:07:16_PDT_2019
# Cuda compilation tools, release 10.1, V10.1.243
# """
nvcc_out = subprocess.run("nvcc -V", shell=True, stdout=subprocess.PIPE)
nvcc_str = nvcc_out.stdout.decode("utf-8")
nvcc_cuda = re.findall(r"[.]*([\d]+.[\d]+),[.]*", nvcc_str)
if len(nvcc_cuda) == 0:
raise RuntimeError(f"nvcc -V error! {nvcc_str}")
else:
version = float(nvcc_cuda[0])
assert version in VALID_CUDA, f"CUDA Version {version} must in {VALID_CUDA}. " \
f"Please manually install the CUDA meets the requirements."
print(f"Cuda version is {version}")
return version
def get_python_version() -> str:
"""
Get the python version. The python version is aaa.b.c, and it only returns aaa.b
Returns:
version (str):
"""
version = str(platform.python_version())
version = "".join(version.split(".")[0:2])
assert "36" <= version <= "38", f"Currently, it only support the python version with 3.6.+, 3.7.+, 3.8.+"
return version
def get_torch_version(cuda_version_str, precompile_torch_cuda_paris, default_torch_cuda_mapper) -> str:
"""
Args:
cuda_version_str:
precompile_torch_cuda_paris (dict): PRECOMPILED_TORCH_CUDA_PAIRS or WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS
default_torch_cuda_mapper (dict): DEFAULT_LINUX_CUDA_TORCH or DEFAULT_WINDOWS_CUDA_TORCH
Returns:
torch_version (str):
"""
if "torch" in os.environ:
attempt_torch_version_str = os.environ["torch"]
torch_cuda_str = f"{attempt_torch_version_str}+{cuda_version_str}"
if torch_cuda_str not in precompile_torch_cuda_paris:
torch_version_str = default_torch_cuda_mapper[cuda_version_str]
print(f"torch=={attempt_torch_version_str} is incompatible with cuda {cuda_version_str}. "
f"The compatible torch-cuda version are {precompile_torch_cuda_paris.keys()},"
f"and here we install the torch+cuda=={torch_version_str} + {cuda_version_str}.")
else:
torch_version_str = attempt_torch_version_str
else:
torch_version_str = default_torch_cuda_mapper[cuda_version_str]
return torch_version_str
def platform_dependencies():
"""Parse the pre-complied consistent versions of torch, torchvision, mmcv, and CUDA.
The torch version must >= 1.6.0, and the CUDA version must >= 10.1.
Currently, it only supports Linux and Windows.
If the platform is Linux, we will use torch 1.7.0 + CUDA Version.
Otherwise if the platform is windows, we will use torch 1.6.0 + CUDA VERSION.
Returns:
List[List[str]]: list of setup requirements items.
"""
global TORCH_DIST, PRECOMPILED_TORCH_CUDA_PAIRS, WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS, \
DEFAULT_LINUX_CUDA_TORCH, DEFAULT_WINDOWS_CUDA_TORCH
cuda_version = get_cuda_version()
cuda_version_str = "cu" + str(cuda_version).replace(".", "")
packages = []
if platform.system().lower() == "windows":
python_version = get_python_version()
python_abi_version = python_version
if python_version != "38":
python_abi_version += "m"
torch = get_torch_version(cuda_version_str, WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS, DEFAULT_WINDOWS_CUDA_TORCH)
torch_cuda_version = f"{torch}+{cuda_version_str}"
numpy_version = "numpy==1.19.3"
assert torch_cuda_version in WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS, \
f"There is no pre-complied pytorch 1.6.0 with CUDA {cuda_version}, " \
f"and you might need to install pytorch 1.6.0 with CUDA {cuda_version} from source."
torch_link = WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS[torch_cuda_version]["torch"] \
.format(PYTHON_VERSION=python_version, PYTHON_ABI_VERSION=python_abi_version)
torchvision_link = WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS[torch_cuda_version]["torchvision"] \
.format(PYTHON_VERSION=python_version, PYTHON_ABI_VERSION=python_abi_version)
mmcv_version = WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS[torch_cuda_version]["mmcv-full"]
mmcv_dist = WINDOWS_PRECOMPILED_TORCH_CUDA_PAIRS[torch_cuda_version]["mmcv-dist"]
packages.append([f"{torch_link}", "-f", TORCH_DIST])
packages.append([f"{torchvision_link}", "-f", TORCH_DIST])
packages.append([f"mmcv-full=={mmcv_version}", "-f", mmcv_dist])
packages.append(numpy_version)
elif platform.system().lower() == "linux":
torch = get_torch_version(cuda_version_str, PRECOMPILED_TORCH_CUDA_PAIRS, DEFAULT_LINUX_CUDA_TORCH)
torch_cuda_version = f"{torch}+{cuda_version_str}"
numpy_version = "numpy>=1.19.3"
assert torch_cuda_version in PRECOMPILED_TORCH_CUDA_PAIRS, \
f"There is no pre-complied pytorch {torch} with CUDA {cuda_version} in {TORCH_DIST}, " \
f"and you might need to install pytorch {torch} with CUDA {cuda_version} from source."
torch_version = PRECOMPILED_TORCH_CUDA_PAIRS[torch_cuda_version]["torch"]
torchvision_version = PRECOMPILED_TORCH_CUDA_PAIRS[torch_cuda_version]["torchvision"]
mmcv_version = PRECOMPILED_TORCH_CUDA_PAIRS[torch_cuda_version]["mmcv-full"]
mmcv_dist = PRECOMPILED_TORCH_CUDA_PAIRS[torch_cuda_version]["mmcv-dist"]
packages.append([f"torch=={torch_version}", "-f", TORCH_DIST])
packages.append([f"torchvision=={torchvision_version}", "-f", TORCH_DIST])
packages.append([f"mmcv-full=={mmcv_version}", "-f", mmcv_dist])
packages.append(numpy_version)
else:
raise ValueError(f"Currently it only supports 'windows' and 'linux'.")
return packages
def parse_requirements(fname="requirements.txt", with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith("-r "):
# Allow specifying requirements in other files
target = line.split(" ")[1]
for info in parse_require_file(target):
yield info
else:
info = {"line": line}
if line.startswith("-e "):
info["package"] = line.split("#egg=")[1]
elif "@git+" in line:
info["package"] = line
else:
# Remove versioning from the package
pat = "(" + "|".join([">=", "==", ">"]) + ")"
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info["package"] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ";" in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(";"))
info["platform_deps"] = platform_deps
else:
version = rest # NOQA
info["version"] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, "r") as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith("#"):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info["package"]]
if with_version and "version" in info:
parts.extend(info["version"])
if not sys.version.startswith("3.4"):
# apparently package_deps are broken in 3.4
platform_deps = info.get("platform_deps")
if platform_deps is not None:
parts.append(";" + platform_deps)
item = "".join(parts)
yield item
packages = list(gen_packages_items())
return packages
# 1. install torch, torchvision, and mmcv firstly.
torch_torchvision_mmcv = platform_dependencies()
# 2. other installed requires
install_requires = parse_requirements("requirements/runtime.txt")
# 3. build requires
build_requiers = parse_requirements("requirements/build.txt")
# 4. pip install all of them
all_requires = [[f"pip=={PIP_VERSION}"]] + torch_torchvision_mmcv + install_requires + build_requiers
pip_executable = [sys.executable, "-m", "pip", "install"]
for package_line in all_requires:
if isinstance(package_line, str):
package_line = [package_line]
pip_install_line = pip_executable + package_line
print(" ".join(pip_install_line))
subprocess.run(pip_install_line)
# 5. setup iPERCore
setup(
name="iPERCore",
version="0.2.0",
author="Wen Liu, and Zhixin Piao",
author_email="liuwen@shanghaitech.edu.cn",
url="https://github.com/iPERDance/iPERCore",
description="The core of impersonator++.",
packages=find_packages(exclude=("assets",)),
python_requires=">=3.6, <=3.8",
install_requires=install_requires,
entry_points={
"console_scripts": [
"run_imitator = iPERCore.services.run_imitator:run_imitator",
"run_swapper = iPERCore.services.run_imitator:run_swapper",
"run_viewer = iPERCore.services.run_imitator:run_viewer",
]
}
)
|
f73ffa688c47f903e316ab712822adf1dcb8af34
|
572afc77a246acb9483b47fc9e1839f47005d736
|
/python/federatedml/model_selection/start_cross_validation.py
|
94a5ae608671fcc68b88c88e709e84f549d00091
|
[
"Apache-2.0"
] |
permissive
|
FederatedAI/FATE
|
7c787c308cca9ff46f287d24569c68de0a1cac07
|
8767db5ec0cb93784f64b290bc39b7b545c530fb
|
refs/heads/master
| 2023-08-17T10:13:00.302529
| 2023-06-14T07:01:38
| 2023-06-14T07:01:38
| 167,349,656
| 4,942
| 1,571
|
Apache-2.0
| 2023-09-14T07:02:29
| 2019-01-24T10:32:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
start_cross_validation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.model_selection.k_fold import KFold
from federatedml.util import LOGGER
def _get_cv_param(model):
model.model_param.cv_param.role = model.role
model.model_param.cv_param.mode = model.mode
return model.model_param.cv_param
def run(model, data_instances, host_do_evaluate=False):
if not model.need_run:
return data_instances
kflod_obj = KFold()
cv_param = _get_cv_param(model)
output_data = kflod_obj.run(cv_param, data_instances, model, host_do_evaluate)
LOGGER.info("Finish KFold run")
return output_data
|
a2c1dc15e3f4fec16c059dedac625866a956e52e
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Accessdata/Scripts/AccessdataCheckProcessExistsInSnapshot/AccessdataCheckProcessExistsInSnapshot_test.py
|
bb73e96b947aaf4e42819715c34f93fc6343c704
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
AccessdataCheckProcessExistsInSnapshot_test.py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def test_main_flow_process_exist(mocker):
"""
Given:
- a process ID that exists.
When:
- executing the main flow.
Then:
- Make sure that an entry is returned to the war-room meaning that the process exist
"""
mocker.patch.object(demisto, 'args', return_value={'filepath': 'path', 'process': 'process-1'})
from AccessdataCheckProcessExistsInSnapshot import main
mocked_data = '<?xml version="1.0" encoding="UTF-8" ?><root><Process><Name>process-1<' \
'/Name></Process><Process><Name>process-2</Name></Process></root>'
mocker.patch.object(demisto, 'executeCommand')
mocker.patch.object(demisto, 'get', return_value=mocked_data)
demisto_results_mocker = mocker.patch.object(demisto, 'results')
main()
assert demisto_results_mocker.called
assert demisto_results_mocker.call_args.args[0] == {
'Type': 1,
'ContentsFormat': 'json',
'Contents': {'Name': 'process-1', 'Exists': 'Yes'}, 'HumanReadable': 'Process "process-1" exists: Yes',
'EntryContext': {'Accessdata.Process(val && val.Name == obj.Name)': {'Name': 'process-1', 'Exists': 'Yes'}}
}
def test_main_flow_process_does_not_exist(mocker):
"""
Given:
- a process ID that does not exist.
When:
- executing the main flow.
Then:
- Make sure that an entry is returned to the war-room meaning that the process does not exist
"""
mocker.patch.object(demisto, 'args', return_value={'filepath': 'path', 'process': 'process-3'})
from AccessdataCheckProcessExistsInSnapshot import main
mocked_data = '<?xml version="1.0" encoding="UTF-8" ?><root><Process><Name>process-1<' \
'/Name></Process><Process><Name>process-2</Name></Process></root>'
mocker.patch.object(demisto, 'executeCommand')
mocker.patch.object(demisto, 'get', return_value=mocked_data)
demisto_results_mocker = mocker.patch.object(demisto, 'results')
main()
assert demisto_results_mocker.called
assert demisto_results_mocker.call_args.args[0] == {
'Type': 1, 'ContentsFormat': 'json', 'Contents': {'Name': 'process-3', 'Exists': 'No'},
'HumanReadable': 'Process "process-3" exists: No',
'EntryContext': {'Accessdata.Process(val && val.Name == obj.Name)': {'Name': 'process-3', 'Exists': 'No'}}
}
|
679a7320b1c92e696db1ad7954f0fde0e41d8f43
|
d01fa1b6668c66236405b799e39e529d1492af7c
|
/tests/base.py
|
b5cac99461884fd4652fd00de39b816e60c8f302
|
[
"MIT"
] |
permissive
|
chrisdev/wagtail-cookiecutter-foundation
|
426ffd974aa08ab10e4b0e44d5003476c597f2e4
|
e7d56ee01eb5976588129d7bd4d5fc6dab2d794a
|
refs/heads/master
| 2023-08-31T06:05:43.999253
| 2022-03-31T18:44:37
| 2022-03-31T18:44:37
| 33,870,540
| 189
| 72
|
MIT
| 2023-09-14T03:30:34
| 2015-04-13T13:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
base.py
|
import os
import re
import shutil
import unittest
from os.path import exists, dirname, join
import sh
from cookiecutter.main import cookiecutter
class DjangoCookieTestCase(unittest.TestCase):
root_dir = dirname(dirname(__file__))
ctx = {}
destpath = None
def check_paths(self, paths):
"""
Method to check all paths have correct substitutions,
used by other tests cases
"""
# Construct the cookiecutter search pattern
pattern = r'{{(\s?cookiecutter)[.](.*?)}}'
re_obj = re.compile(pattern)
# Assert that no match is found in any of the files
for path in paths:
for line in open(path, 'r'):
match = re_obj.search(line)
self.assertIsNone(
match,
"cookiecutter variable not replaced in {}".format(path))
def generate_project(self, extra_context=None):
ctx = {
"project_name": "Wagtail Project",
"project_slug": "wagtail_project",
"version_control_system": "hg",
"author_name": "Your Name",
"email": "Your email",
"description": "A short description of the project.",
"timezone": "UTC",
"now": "2015/04/16",
"year": "2015",
"production_host_name": "example.org",
"use_ssl_in_production": "true",
"staging_host_name": "staging.example.org",
}
if extra_context:
assert isinstance(extra_context, dict)
ctx.update(extra_context)
self.ctx = ctx
self.destpath = join(self.root_dir, self.ctx['project_slug'])
cookiecutter(
template='./', checkout=None, no_input=True, extra_context=ctx
)
# Build a list containing absolute paths to the generated files
paths = [os.path.join(dirpath, file_path)
for dirpath, subdirs, files in os.walk(self.destpath)
for file_path in files]
return paths
def clean(self):
if exists(self.destpath):
shutil.rmtree(self.destpath)
sh.cd(self.root_dir)
def tearDown(self):
self.clean()
|
bf0bdb886ec78cc67cc844ee859e5f72b77ba65c
|
e5aff0646237acf3639ac805652143cd8267bf33
|
/BlenderToolBox/setMat_ceramic.py
|
9de6d3ccd5c00bbce65c6be1f924fe4e11873e21
|
[
"Apache-2.0"
] |
permissive
|
HTDerekLiu/BlenderToolbox
|
42943cf9fee7277d319ff5baffe7810c4c27dfe4
|
8044e77268ff018514ad1501c291f6deb6a07ec6
|
refs/heads/master
| 2023-07-20T05:14:58.736225
| 2023-07-10T16:33:17
| 2023-07-10T16:33:17
| 162,408,776
| 408
| 48
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,908
|
py
|
setMat_ceramic.py
|
# Copyright 2020 Hsueh-Ti Derek Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from . initColorNode import initColorNode
def setMat_ceramic(mesh, meshC, subC):
mat = bpy.data.materials.new('MeshMaterial')
mesh.data.materials.append(mat)
mesh.active_material = mat
mat.use_nodes = True
tree = mat.node_tree
# init color node
C1Node = initColorNode(tree, meshC)
C2Node = initColorNode(tree, subC, [200, 400], [200, 200])
DIF = tree.nodes.new('ShaderNodeBsdfDiffuse')
tree.links.new(C1Node.outputs['Color'], DIF.inputs['Color'])
SUB1 = tree.nodes.new('ShaderNodeSubsurfaceScattering')
SUB1.inputs['Scale'].default_value = 0.3
tree.links.new(C2Node.outputs['Color'], SUB1.inputs['Color'])
MIX2 = tree.nodes.new('ShaderNodeMixShader')
MIX2.inputs['Fac'].default_value = 0.35
tree.links.new(DIF.outputs['BSDF'], MIX2.inputs[1])
tree.links.new(SUB1.outputs[0], MIX2.inputs[2])
LW = tree.nodes.new('ShaderNodeLayerWeight')
LW.inputs['Blend'].default_value = 0.35
GLO = tree.nodes.new('ShaderNodeBsdfGlossy')
MIX3 = tree.nodes.new('ShaderNodeMixShader')
tree.links.new(LW.outputs[0], MIX3.inputs['Fac'])
tree.links.new(MIX2.outputs[0], MIX3.inputs[1])
tree.links.new(GLO.outputs['BSDF'], MIX3.inputs[2])
tree.links.new(MIX3.outputs[0], tree.nodes['Material Output'].inputs['Surface'])
|
1ca1ee0107dd7c94f13aaf6c9c63de584888ac43
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/sa/profiles/Huawei/MA5300/profile.py
|
25699e5b7ca7e45ef260c36cf43e65c3d717eeca
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,236
|
py
|
profile.py
|
# ---------------------------------------------------------------------
# Vendor: Huawei
# OS: MA5300
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Huawei.MA5300"
pattern_more = [
(rb"--- More:", b" "),
(rb"[ ]+---- More \(Press CTRL\+C break\) ---[ ]+", b" "), # [ ]+ use for save \n in output
# stream, because more pattern remove from stream
(rb"Note: Terminal", b"\n"),
(rb"Warning: Battery is low power!", b"\n"),
(rb"\{\s<cr>.*\s\}:", b"\n"),
(rb"^Are you sure?\[Y/N\]", b"y\n"),
(rb"^\{ terminal\<K\> \}\:", b"terminal\n"),
(rb"\{ <cr>\|interface<K> \}\:", b"\n"),
]
pattern_username = rb"^Username:"
pattern_password = rb"^Password:"
command_exit = "logout"
command_super = b"enable"
command_enter_config = "configure terminal"
command_leave_config = "end"
command_save_config = "save"
enable_cli_session = False # With False mixin commands output over script
pattern_prompt = rb"(?P<hostname>\S+)(?:\(.*)?#"
pattern_unprivileged_prompt = rb"^(?P<hostname>[a-zA-Z0-9-_\.\/()]+)(?:-[a-zA-Z0-9/]+)*>$"
pattern_syntax_error = (
rb"(% Unknown command, the error locates at \'^\'| Logged Fail!|"
rb"System is busy, please try after a while)"
)
rogue_chars = [
re.compile(rb"\x1b\[39D\s+\x1b\[39D"),
re.compile(rb"\n\r\s+Line \d+ operating, attempt of the Line -\d+ denied!\n\r"),
re.compile(rb"\r\n\s+Note: Terminal users login \(IP: \S+ \)"),
re.compile(rb"\r\nWarning: Battery is low power!"),
b"\r",
]
# to one SNMP GET request
snmp_metrics_get_chunk = 30
# Timeout for snmp GET request
snmp_metrics_get_timeout = 5
# to one SNMP GET request for get_interface_status_ex
snmp_ifstatus_get_chunk = 30
# Timeout for snmp GET request for get_interface_status_ex
snmp_ifstatus_get_timeout = 3
_IF_TYPES = {
"aux": "other",
"loo": "loopback",
"m-e": "management",
"nul": "null",
"vla": "SVI",
}
@classmethod
def get_interface_type(cls, name):
return cls._IF_TYPES.get(name[:3].lower(), "unknown")
def get_interface_snmp_index(self, name):
return None
# def setup_session(self, script):
# script.cli("terminal type vt100", ignore_errors=True)
# script.cli("config", ignore_errors=True)
# script.cli("line vty 0 3", ignore_errors=True)
# script.cli("history size 0", ignore_errors=True)
# script.cli("length 0", ignore_errors=True)
# script.cli("exit", ignore_errors=True)
# script.cli("cls", ignore_errors=True)
# def shutdown_session(self, script):
# script.cli("config", ignore_errors=True)
# script.cli("line vty 0 3", ignore_errors=True)
# script.cli("no length 0", ignore_errors=True)
# script.cli("exit", ignore_errors=True)
|
ff4db2bae209a5a675da2b01b44937a36e829c0b
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/UserInfoAndBenefitQueryResult.py
|
ab9789977ea9f20388900f6a9fa8b1c495227bc2
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,569
|
py
|
UserInfoAndBenefitQueryResult.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BenefitGradePoint import BenefitGradePoint
class UserInfoAndBenefitQueryResult(object):
def __init__(self):
self._balance = None
self._benefit_info_list = None
self._grade = None
@property
def balance(self):
return self._balance
@balance.setter
def balance(self, value):
self._balance = value
@property
def benefit_info_list(self):
return self._benefit_info_list
@benefit_info_list.setter
def benefit_info_list(self, value):
if isinstance(value, list):
self._benefit_info_list = list()
for i in value:
if isinstance(i, BenefitGradePoint):
self._benefit_info_list.append(i)
else:
self._benefit_info_list.append(BenefitGradePoint.from_alipay_dict(i))
@property
def grade(self):
return self._grade
@grade.setter
def grade(self, value):
self._grade = value
def to_alipay_dict(self):
params = dict()
if self.balance:
if hasattr(self.balance, 'to_alipay_dict'):
params['balance'] = self.balance.to_alipay_dict()
else:
params['balance'] = self.balance
if self.benefit_info_list:
if isinstance(self.benefit_info_list, list):
for i in range(0, len(self.benefit_info_list)):
element = self.benefit_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.benefit_info_list[i] = element.to_alipay_dict()
if hasattr(self.benefit_info_list, 'to_alipay_dict'):
params['benefit_info_list'] = self.benefit_info_list.to_alipay_dict()
else:
params['benefit_info_list'] = self.benefit_info_list
if self.grade:
if hasattr(self.grade, 'to_alipay_dict'):
params['grade'] = self.grade.to_alipay_dict()
else:
params['grade'] = self.grade
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = UserInfoAndBenefitQueryResult()
if 'balance' in d:
o.balance = d['balance']
if 'benefit_info_list' in d:
o.benefit_info_list = d['benefit_info_list']
if 'grade' in d:
o.grade = d['grade']
return o
|
302e86ac804a0c5500e8dfb7835d7d43c109a66f
|
baa2c6f22ff563d417e34692bf3345077eb8fa5f
|
/IPython/core/tests/test_ultratb.py
|
c4de95d564a569378809a22cb8556b2023a96d28
|
[
"BSD-3-Clause"
] |
permissive
|
ipython/ipython
|
c42ea223b6e391bb7dd39888cb959d4d5d6b21a1
|
e5103f971233fd66b558585cce7a4f52a716cd56
|
refs/heads/main
| 2023-08-30T18:27:18.436521
| 2023-08-29T12:16:00
| 2023-08-29T12:16:00
| 658,518
| 13,673
| 4,729
|
BSD-3-Clause
| 2023-09-12T20:22:09
| 2010-05-10T04:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 12,026
|
py
|
test_ultratb.py
|
# encoding: utf-8
"""Tests for IPython.core.ultratb
"""
import io
import os.path
import platform
import re
import sys
import traceback
import unittest
from textwrap import dedent
from tempfile import TemporaryDirectory
from IPython.core.ultratb import ColorTB, VerboseTB
from IPython.testing import tools as tt
from IPython.testing.decorators import onlyif_unicode_paths
from IPython.utils.syspathcontext import prepended_to_syspath
file_1 = """1
2
3
def f():
1/0
"""
file_2 = """def f():
1/0
"""
def recursionlimit(frames):
"""
decorator to set the recursion limit temporarily
"""
def inner(test_function):
def wrapper(*args, **kwargs):
rl = sys.getrecursionlimit()
sys.setrecursionlimit(frames)
try:
return test_function(*args, **kwargs)
finally:
sys.setrecursionlimit(rl)
return wrapper
return inner
class ChangedPyFileTest(unittest.TestCase):
def test_changing_py_file(self):
"""Traceback produced if the line where the error occurred is missing?
https://github.com/ipython/ipython/issues/1456
"""
with TemporaryDirectory() as td:
fname = os.path.join(td, "foo.py")
with open(fname, "w", encoding="utf-8") as f:
f.write(file_1)
with prepended_to_syspath(td):
ip.run_cell("import foo")
with tt.AssertPrints("ZeroDivisionError"):
ip.run_cell("foo.f()")
# Make the file shorter, so the line of the error is missing.
with open(fname, "w", encoding="utf-8") as f:
f.write(file_2)
# For some reason, this was failing on the *second* call after
# changing the file, so we call f() twice.
with tt.AssertNotPrints("Internal Python error", channel='stderr'):
with tt.AssertPrints("ZeroDivisionError"):
ip.run_cell("foo.f()")
with tt.AssertPrints("ZeroDivisionError"):
ip.run_cell("foo.f()")
iso_8859_5_file = u'''# coding: iso-8859-5
def fail():
"""дбИЖ"""
1/0 # дбИЖ
'''
class NonAsciiTest(unittest.TestCase):
@onlyif_unicode_paths
def test_nonascii_path(self):
# Non-ascii directory name as well.
with TemporaryDirectory(suffix=u'é') as td:
fname = os.path.join(td, u"fooé.py")
with open(fname, "w", encoding="utf-8") as f:
f.write(file_1)
with prepended_to_syspath(td):
ip.run_cell("import foo")
with tt.AssertPrints("ZeroDivisionError"):
ip.run_cell("foo.f()")
def test_iso8859_5(self):
with TemporaryDirectory() as td:
fname = os.path.join(td, 'dfghjkl.py')
with io.open(fname, 'w', encoding='iso-8859-5') as f:
f.write(iso_8859_5_file)
with prepended_to_syspath(td):
ip.run_cell("from dfghjkl import fail")
with tt.AssertPrints("ZeroDivisionError"):
with tt.AssertPrints(u'дбИЖ', suppress=False):
ip.run_cell('fail()')
def test_nonascii_msg(self):
cell = u"raise Exception('é')"
expected = u"Exception('é')"
ip.run_cell("%xmode plain")
with tt.AssertPrints(expected):
ip.run_cell(cell)
ip.run_cell("%xmode verbose")
with tt.AssertPrints(expected):
ip.run_cell(cell)
ip.run_cell("%xmode context")
with tt.AssertPrints(expected):
ip.run_cell(cell)
ip.run_cell("%xmode minimal")
with tt.AssertPrints(u"Exception: é"):
ip.run_cell(cell)
# Put this back into Context mode for later tests.
ip.run_cell("%xmode context")
class NestedGenExprTestCase(unittest.TestCase):
"""
Regression test for the following issues:
https://github.com/ipython/ipython/issues/8293
https://github.com/ipython/ipython/issues/8205
"""
def test_nested_genexpr(self):
code = dedent(
"""\
class SpecificException(Exception):
pass
def foo(x):
raise SpecificException("Success!")
sum(sum(foo(x) for _ in [0]) for x in [0])
"""
)
with tt.AssertPrints('SpecificException: Success!', suppress=False):
ip.run_cell(code)
indentationerror_file = """if True:
zoon()
"""
class IndentationErrorTest(unittest.TestCase):
def test_indentationerror_shows_line(self):
# See issue gh-2398
with tt.AssertPrints("IndentationError"):
with tt.AssertPrints("zoon()", suppress=False):
ip.run_cell(indentationerror_file)
with TemporaryDirectory() as td:
fname = os.path.join(td, "foo.py")
with open(fname, "w", encoding="utf-8") as f:
f.write(indentationerror_file)
with tt.AssertPrints("IndentationError"):
with tt.AssertPrints("zoon()", suppress=False):
ip.magic('run %s' % fname)
se_file_1 = """1
2
7/
"""
se_file_2 = """7/
"""
class SyntaxErrorTest(unittest.TestCase):
def test_syntaxerror_no_stacktrace_at_compile_time(self):
syntax_error_at_compile_time = """
def foo():
..
"""
with tt.AssertPrints("SyntaxError"):
ip.run_cell(syntax_error_at_compile_time)
with tt.AssertNotPrints("foo()"):
ip.run_cell(syntax_error_at_compile_time)
def test_syntaxerror_stacktrace_when_running_compiled_code(self):
syntax_error_at_runtime = """
def foo():
eval("..")
def bar():
foo()
bar()
"""
with tt.AssertPrints("SyntaxError"):
ip.run_cell(syntax_error_at_runtime)
# Assert syntax error during runtime generate stacktrace
with tt.AssertPrints(["foo()", "bar()"]):
ip.run_cell(syntax_error_at_runtime)
del ip.user_ns['bar']
del ip.user_ns['foo']
def test_changing_py_file(self):
with TemporaryDirectory() as td:
fname = os.path.join(td, "foo.py")
with open(fname, "w", encoding="utf-8") as f:
f.write(se_file_1)
with tt.AssertPrints(["7/", "SyntaxError"]):
ip.magic("run " + fname)
# Modify the file
with open(fname, "w", encoding="utf-8") as f:
f.write(se_file_2)
# The SyntaxError should point to the correct line
with tt.AssertPrints(["7/", "SyntaxError"]):
ip.magic("run " + fname)
def test_non_syntaxerror(self):
# SyntaxTB may be called with an error other than a SyntaxError
# See e.g. gh-4361
try:
raise ValueError('QWERTY')
except ValueError:
with tt.AssertPrints('QWERTY'):
ip.showsyntaxerror()
import sys
if platform.python_implementation() != "PyPy":
"""
New 3.9 Pgen Parser does not raise Memory error, except on failed malloc.
"""
class MemoryErrorTest(unittest.TestCase):
def test_memoryerror(self):
memoryerror_code = "(" * 200 + ")" * 200
ip.run_cell(memoryerror_code)
class Python3ChainedExceptionsTest(unittest.TestCase):
DIRECT_CAUSE_ERROR_CODE = """
try:
x = 1 + 2
print(not_defined_here)
except Exception as e:
x += 55
x - 1
y = {}
raise KeyError('uh') from e
"""
EXCEPTION_DURING_HANDLING_CODE = """
try:
x = 1 + 2
print(not_defined_here)
except Exception as e:
x += 55
x - 1
y = {}
raise KeyError('uh')
"""
SUPPRESS_CHAINING_CODE = """
try:
1/0
except Exception:
raise ValueError("Yikes") from None
"""
def test_direct_cause_error(self):
with tt.AssertPrints(["KeyError", "NameError", "direct cause"]):
ip.run_cell(self.DIRECT_CAUSE_ERROR_CODE)
def test_exception_during_handling_error(self):
with tt.AssertPrints(["KeyError", "NameError", "During handling"]):
ip.run_cell(self.EXCEPTION_DURING_HANDLING_CODE)
def test_suppress_exception_chaining(self):
with tt.AssertNotPrints("ZeroDivisionError"), \
tt.AssertPrints("ValueError", suppress=False):
ip.run_cell(self.SUPPRESS_CHAINING_CODE)
def test_plain_direct_cause_error(self):
with tt.AssertPrints(["KeyError", "NameError", "direct cause"]):
ip.run_cell("%xmode Plain")
ip.run_cell(self.DIRECT_CAUSE_ERROR_CODE)
ip.run_cell("%xmode Verbose")
def test_plain_exception_during_handling_error(self):
with tt.AssertPrints(["KeyError", "NameError", "During handling"]):
ip.run_cell("%xmode Plain")
ip.run_cell(self.EXCEPTION_DURING_HANDLING_CODE)
ip.run_cell("%xmode Verbose")
def test_plain_suppress_exception_chaining(self):
with tt.AssertNotPrints("ZeroDivisionError"), \
tt.AssertPrints("ValueError", suppress=False):
ip.run_cell("%xmode Plain")
ip.run_cell(self.SUPPRESS_CHAINING_CODE)
ip.run_cell("%xmode Verbose")
class RecursionTest(unittest.TestCase):
DEFINITIONS = """
def non_recurs():
1/0
def r1():
r1()
def r3a():
r3b()
def r3b():
r3c()
def r3c():
r3a()
def r3o1():
r3a()
def r3o2():
r3o1()
"""
def setUp(self):
ip.run_cell(self.DEFINITIONS)
def test_no_recursion(self):
with tt.AssertNotPrints("skipping similar frames"):
ip.run_cell("non_recurs()")
@recursionlimit(200)
def test_recursion_one_frame(self):
with tt.AssertPrints(re.compile(
r"\[\.\.\. skipping similar frames: r1 at line 5 \(\d{2,3} times\)\]")
):
ip.run_cell("r1()")
@recursionlimit(160)
def test_recursion_three_frames(self):
with tt.AssertPrints("[... skipping similar frames: "), \
tt.AssertPrints(re.compile(r"r3a at line 8 \(\d{2} times\)"), suppress=False), \
tt.AssertPrints(re.compile(r"r3b at line 11 \(\d{2} times\)"), suppress=False), \
tt.AssertPrints(re.compile(r"r3c at line 14 \(\d{2} times\)"), suppress=False):
ip.run_cell("r3o2()")
class PEP678NotesReportingTest(unittest.TestCase):
ERROR_WITH_NOTE = """
try:
raise AssertionError("Message")
except Exception as e:
try:
e.add_note("This is a PEP-678 note.")
except AttributeError: # Python <= 3.10
e.__notes__ = ("This is a PEP-678 note.",)
raise
"""
def test_verbose_reports_notes(self):
with tt.AssertPrints(["AssertionError", "Message", "This is a PEP-678 note."]):
ip.run_cell(self.ERROR_WITH_NOTE)
def test_plain_reports_notes(self):
with tt.AssertPrints(["AssertionError", "Message", "This is a PEP-678 note."]):
ip.run_cell("%xmode Plain")
ip.run_cell(self.ERROR_WITH_NOTE)
ip.run_cell("%xmode Verbose")
#----------------------------------------------------------------------------
# module testing (minimal)
def test_handlers():
def spam(c, d_e):
(d, e) = d_e
x = c + d
y = c * d
foo(x, y)
def foo(a, b, bar=1):
eggs(a, b + bar)
def eggs(f, g, z=globals()):
h = f + g
i = f - g
return h / i
buff = io.StringIO()
buff.write('')
buff.write('*** Before ***')
try:
buff.write(spam(1, (2, 3)))
except:
traceback.print_exc(file=buff)
handler = ColorTB(ostream=buff)
buff.write('*** ColorTB ***')
try:
buff.write(spam(1, (2, 3)))
except:
handler(*sys.exc_info())
buff.write('')
handler = VerboseTB(ostream=buff)
buff.write('*** VerboseTB ***')
try:
buff.write(spam(1, (2, 3)))
except:
handler(*sys.exc_info())
buff.write('')
|
f25944274674f3337906d1b9be57b3527c35ad5b
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/mmagic/models/editors/deblurganv2/deblurganv2_generator.py
|
d0f98aaa9a207b702e07aa46c4d4a61b2bc29cc3
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 19,475
|
py
|
deblurganv2_generator.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmagic.registry import MODELS
from .deblurganv2_util import MobileNetV2, get_norm_layer, inceptionresnetv2
backbone_list = ['FPNInception', 'FPNMobileNet', 'FPNInceptionSimple']
class FPNHead(nn.Module):
"""Head for FPNInception,FPNInceptionSimple and FPNMobilenet."""
def __init__(self, num_in, num_mid, num_out):
super().__init__()
self.block0 = nn.Conv2d(
num_in, num_mid, kernel_size=3, padding=1, bias=False)
self.block1 = nn.Conv2d(
num_mid, num_out, kernel_size=3, padding=1, bias=False)
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor ): You can directly input a ``torch.Tensor``.
Returns:
torch.Tensor : ``torch.tensor`` will be returned.
"""
x = nn.functional.relu(self.block0(x), inplace=True)
x = nn.functional.relu(self.block1(x), inplace=True)
return x
class FPN_inception(nn.Module):
def __init__(self, norm_layer, num_filter=256, pretrained='imagenet'):
"""Creates an `FPN` instance for feature extraction.
Args:
num_filter: the number of filters in each output pyramid level
pretrained: use ImageNet pre-trained backbone feature extractor
"""
super().__init__()
self.inception = inceptionresnetv2(
num_classes=1000, pretrained=pretrained)
self.enc0 = self.inception.conv2d_1a
self.enc1 = nn.Sequential(
self.inception.conv2d_2a,
self.inception.conv2d_2b,
self.inception.maxpool_3a,
) # 64
self.enc2 = nn.Sequential(
self.inception.conv2d_3b,
self.inception.conv2d_4a,
self.inception.maxpool_5a,
) # 192
self.enc3 = nn.Sequential(
self.inception.mixed_5b,
self.inception.repeat,
self.inception.mixed_6a,
) # 1088
self.enc4 = nn.Sequential(
self.inception.repeat_1,
self.inception.mixed_7a,
) # 2080
self.td1 = nn.Sequential(
nn.Conv2d(num_filter, num_filter, kernel_size=3, padding=1),
norm_layer(num_filter), nn.ReLU(inplace=True))
self.td2 = nn.Sequential(
nn.Conv2d(num_filter, num_filter, kernel_size=3, padding=1),
norm_layer(num_filter), nn.ReLU(inplace=True))
self.td3 = nn.Sequential(
nn.Conv2d(num_filter, num_filter, kernel_size=3, padding=1),
norm_layer(num_filter), nn.ReLU(inplace=True))
self.pad = nn.ReflectionPad2d(1)
self.lateral4 = nn.Conv2d(2080, num_filter, kernel_size=1, bias=False)
self.lateral3 = nn.Conv2d(1088, num_filter, kernel_size=1, bias=False)
self.lateral2 = nn.Conv2d(192, num_filter, kernel_size=1, bias=False)
self.lateral1 = nn.Conv2d(64, num_filter, kernel_size=1, bias=False)
self.lateral0 = nn.Conv2d(
32, num_filter // 2, kernel_size=1, bias=False)
for param in self.inception.parameters():
param.requires_grad = False
def unfreeze(self):
for param in self.inception.parameters():
param.requires_grad = True
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor ): You can directly input a ``torch.Tensor``.
Returns:
torch.Tensor : ``torch.tensor`` will be returned.
"""
# Bottom-up pathway, from ResNet
enc0 = self.enc0(x)
enc1 = self.enc1(enc0) # 256
enc2 = self.enc2(enc1) # 512
enc3 = self.enc3(enc2) # 1024
enc4 = self.enc4(enc3) # 2048
# Lateral connections
lateral4 = self.pad(self.lateral4(enc4))
lateral3 = self.pad(self.lateral3(enc3))
lateral2 = self.lateral2(enc2)
lateral1 = self.pad(self.lateral1(enc1))
lateral0 = self.lateral0(enc0)
# Top-down pathway
pad = (1, 2, 1, 2) # pad last dim by 1 on each side
pad1 = (0, 1, 0, 1)
map4 = lateral4
map3 = self.td1(
lateral3 +
nn.functional.upsample(map4, scale_factor=2, mode='nearest'))
map2 = self.td2(
F.pad(lateral2, pad, 'reflect') +
nn.functional.upsample(map3, scale_factor=2, mode='nearest'))
map1 = self.td3(
lateral1 +
nn.functional.upsample(map2, scale_factor=2, mode='nearest'))
return F.pad(lateral0, pad1, 'reflect'), map1, map2, map3, map4
class FPNInception(nn.Module):
"""Feature Pyramid Network (FPN) with four feature maps of resolutions 1/4,
1/8, 1/16, 1/32 and `num_filter` filters for all feature maps."""
def __init__(self,
norm_layer,
output_ch=3,
num_filter=128,
num_filter_fpn=256):
super().__init__()
norm_layer = get_norm_layer(norm_type=norm_layer)
self.fpn = FPN_inception(
num_filter=num_filter_fpn, norm_layer=norm_layer)
# The segmentation heads on top of the FPN
self.head1 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head2 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head3 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head4 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.smooth = nn.Sequential(
nn.Conv2d(4 * num_filter, num_filter, kernel_size=3, padding=1),
norm_layer(num_filter),
nn.ReLU(),
)
self.smooth2 = nn.Sequential(
nn.Conv2d(num_filter, num_filter // 2, kernel_size=3, padding=1),
norm_layer(num_filter // 2),
nn.ReLU(),
)
self.final = nn.Conv2d(
num_filter // 2, output_ch, kernel_size=3, padding=1)
def unfreeze(self):
self.fpn.unfreeze()
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor ): You can directly input a ``torch.Tensor``.
Returns:
torch.Tensor : ``torch.tensor`` will be returned.
"""
map0, map1, map2, map3, map4 = self.fpn(x)
map4 = nn.functional.upsample(
self.head4(map4), scale_factor=8, mode='nearest')
map3 = nn.functional.upsample(
self.head3(map3), scale_factor=4, mode='nearest')
map2 = nn.functional.upsample(
self.head2(map2), scale_factor=2, mode='nearest')
map1 = nn.functional.upsample(
self.head1(map1), scale_factor=1, mode='nearest')
smoothed = self.smooth(torch.cat([map4, map3, map2, map1], dim=1))
smoothed = nn.functional.upsample(
smoothed, scale_factor=2, mode='nearest')
smoothed = self.smooth2(smoothed + map0)
smoothed = nn.functional.upsample(
smoothed, scale_factor=2, mode='nearest')
final = self.final(smoothed)
res = torch.tanh(final) + x
return torch.clamp(res, min=-1, max=1)
class FPN_inceptionsimple(nn.Module):
def __init__(self, norm_layer, num_filters=256):
"""Creates an `FPN` instance for feature extraction.
Args:
num_filters: the number of filters in each output pyramid level
pretrained: use ImageNet pre-trained backbone feature extractor
"""
super().__init__()
self.inception = inceptionresnetv2(
num_classes=1000, pretrained='imagenet')
self.enc0 = self.inception.conv2d_1a
self.enc1 = nn.Sequential(
self.inception.conv2d_2a,
self.inception.conv2d_2b,
self.inception.maxpool_3a,
) # 64
self.enc2 = nn.Sequential(
self.inception.conv2d_3b,
self.inception.conv2d_4a,
self.inception.maxpool_5a,
) # 192
self.enc3 = nn.Sequential(
self.inception.mixed_5b,
self.inception.repeat,
self.inception.mixed_6a,
) # 1088
self.enc4 = nn.Sequential(
self.inception.repeat_1,
self.inception.mixed_7a,
) # 2080
self.pad = nn.ReflectionPad2d(1)
self.lateral4 = nn.Conv2d(2080, num_filters, kernel_size=1, bias=False)
self.lateral3 = nn.Conv2d(1088, num_filters, kernel_size=1, bias=False)
self.lateral2 = nn.Conv2d(192, num_filters, kernel_size=1, bias=False)
self.lateral1 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False)
self.lateral0 = nn.Conv2d(
32, num_filters // 2, kernel_size=1, bias=False)
for param in self.inception.parameters():
param.requires_grad = False
def unfreeze(self):
for param in self.inception.parameters():
param.requires_grad = True
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor ): You can directly input a ``torch.Tensor``.
Returns:
torch.Tensor : ``torch.tensor`` will be returned.
"""
# Bottom-up pathway, from ResNet
enc0 = self.enc0(x)
enc1 = self.enc1(enc0) # 256
enc2 = self.enc2(enc1) # 512
enc3 = self.enc3(enc2) # 1024
enc4 = self.enc4(enc3) # 2048
# Lateral connections
lateral4 = self.pad(self.lateral4(enc4))
lateral3 = self.pad(self.lateral3(enc3))
lateral2 = self.lateral2(enc2)
lateral1 = self.pad(self.lateral1(enc1))
lateral0 = self.lateral0(enc0)
# Top-down pathway
pad = (1, 2, 1, 2) # pad last dim by 1 on each side
pad1 = (0, 1, 0, 1)
map4 = lateral4
map3 = lateral3 + nn.functional.upsample(
map4, scale_factor=2, mode='nearest')
map2 = F.pad(lateral2, pad, 'reflect') + nn.functional.upsample(
map3, scale_factor=2, mode='nearest')
map1 = lateral1 + nn.functional.upsample(
map2, scale_factor=2, mode='nearest')
return F.pad(lateral0, pad1, 'reflect'), map1, map2, map3, map4
class FPNInceptionSimple(nn.Module):
"""Feature Pyramid Network (FPN) with four feature maps of resolutions 1/4,
1/8, 1/16, 1/32 and `num_filter` filters for all feature maps."""
def __init__(self,
norm_layer,
output_ch=3,
num_filter=128,
num_filter_fpn=256):
super().__init__()
norm_layer = get_norm_layer(norm_type=norm_layer)
self.fpn = FPN_inceptionsimple(
num_filter=num_filter_fpn, norm_layer=norm_layer)
# The segmentation heads on top of the FPN
self.head1 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head2 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head3 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head4 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.smooth = nn.Sequential(
nn.Conv2d(4 * num_filter, num_filter, kernel_size=3, padding=1),
norm_layer(num_filter),
nn.ReLU(),
)
self.smooth2 = nn.Sequential(
nn.Conv2d(num_filter, num_filter // 2, kernel_size=3, padding=1),
norm_layer(num_filter // 2),
nn.ReLU(),
)
self.final = nn.Conv2d(
num_filter // 2, output_ch, kernel_size=3, padding=1)
def unfreeze(self):
self.fpn.unfreeze()
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor ): You can directly input a ``torch.Tensor``.
Returns:
torch.Tensor : ``torch.tensor`` will be returned.
"""
map0, map1, map2, map3, map4 = self.fpn(x)
map4 = nn.functional.upsample(
self.head4(map4), scale_factor=8, mode='nearest')
map3 = nn.functional.upsample(
self.head3(map3), scale_factor=4, mode='nearest')
map2 = nn.functional.upsample(
self.head2(map2), scale_factor=2, mode='nearest')
map1 = nn.functional.upsample(
self.head1(map1), scale_factor=1, mode='nearest')
smoothed = self.smooth(torch.cat([map4, map3, map2, map1], dim=1))
smoothed = nn.functional.upsample(
smoothed, scale_factor=2, mode='nearest')
smoothed = self.smooth2(smoothed + map0)
smoothed = nn.functional.upsample(
smoothed, scale_factor=2, mode='nearest')
final = self.final(smoothed)
res = torch.tanh(final) + x
return torch.clamp(res, min=-1, max=1)
class FPN_mobilenet(nn.Module):
def __init__(self, norm_layer, num_filters=128, pretrained=None):
"""Creates an `FPN` instance for feature extraction.
Args:
num_filters: the number of filters in each output pyramid level
pretrained: use ImageNet pre-trained backbone feature extractor
"""
super().__init__()
net = MobileNetV2(n_class=1000)
if pretrained:
# Load weights into the project directory
if torch.cuda.is_available():
state_dict = torch.load(
pretrained) # add map_location='cpu' if no gpu
else:
state_dict = torch.load(pretrained, map_location='cpu')
net.load_state_dict(state_dict)
self.features = net.features
self.enc0 = nn.Sequential(*self.features[0:2])
self.enc1 = nn.Sequential(*self.features[2:4])
self.enc2 = nn.Sequential(*self.features[4:7])
self.enc3 = nn.Sequential(*self.features[7:11])
self.enc4 = nn.Sequential(*self.features[11:16])
self.td1 = nn.Sequential(
nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
norm_layer(num_filters), nn.ReLU(inplace=True))
self.td2 = nn.Sequential(
nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
norm_layer(num_filters), nn.ReLU(inplace=True))
self.td3 = nn.Sequential(
nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),
norm_layer(num_filters), nn.ReLU(inplace=True))
self.lateral4 = nn.Conv2d(160, num_filters, kernel_size=1, bias=False)
self.lateral3 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False)
self.lateral2 = nn.Conv2d(32, num_filters, kernel_size=1, bias=False)
self.lateral1 = nn.Conv2d(24, num_filters, kernel_size=1, bias=False)
self.lateral0 = nn.Conv2d(
16, num_filters // 2, kernel_size=1, bias=False)
for param in self.features.parameters():
param.requires_grad = False
def unfreeze(self):
for param in self.features.parameters():
param.requires_grad = True
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor ): You can directly input a ``torch.Tensor``.
Returns:
torch.Tensor : ``torch.tensor`` will be returned.
"""
# Bottom-up pathway, from ResNet
enc0 = self.enc0(x)
enc1 = self.enc1(enc0) # 256
enc2 = self.enc2(enc1) # 512
enc3 = self.enc3(enc2) # 1024
enc4 = self.enc4(enc3) # 2048
# Lateral connections
lateral4 = self.lateral4(enc4)
lateral3 = self.lateral3(enc3)
lateral2 = self.lateral2(enc2)
lateral1 = self.lateral1(enc1)
lateral0 = self.lateral0(enc0)
# Top-down pathway
map4 = lateral4
map3 = self.td1(
lateral3 +
nn.functional.upsample(map4, scale_factor=2, mode='nearest'))
map2 = self.td2(
lateral2 +
nn.functional.upsample(map3, scale_factor=2, mode='nearest'))
map1 = self.td3(
lateral1 +
nn.functional.upsample(map2, scale_factor=2, mode='nearest'))
return lateral0, map1, map2, map3, map4
class FPNMobileNet(nn.Module):
def __init__(self,
norm_layer,
output_ch=3,
num_filter=64,
num_filter_fpn=128,
pretrained=None):
super().__init__()
# Feature Pyramid Network (FPN) with four feature maps of resolutions
# 1/4, 1/8, 1/16, 1/32 and `num_filters` filters for all feature maps.
norm_layer = get_norm_layer(norm_type=norm_layer)
self.fpn = FPN_mobilenet(
num_filters=num_filter_fpn,
norm_layer=norm_layer,
pretrained=pretrained)
# The segmentation heads on top of the FPN
self.head1 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head2 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head3 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.head4 = FPNHead(num_filter_fpn, num_filter, num_filter)
self.smooth = nn.Sequential(
nn.Conv2d(4 * num_filter, num_filter, kernel_size=3, padding=1),
norm_layer(num_filter),
nn.ReLU(),
)
self.smooth2 = nn.Sequential(
nn.Conv2d(num_filter, num_filter // 2, kernel_size=3, padding=1),
norm_layer(num_filter // 2),
nn.ReLU(),
)
self.final = nn.Conv2d(
num_filter // 2, output_ch, kernel_size=3, padding=1)
def unfreeze(self):
"""unfreeze the fpn network."""
self.fpn.unfreeze()
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor ): You can directly input a ``torch.Tensor``.
Returns:
torch.Tensor : ``torch.tensor`` will be returned.
"""
map0, map1, map2, map3, map4 = self.fpn(x)
map4 = nn.functional.upsample(
self.head4(map4), scale_factor=8, mode='nearest')
map3 = nn.functional.upsample(
self.head3(map3), scale_factor=4, mode='nearest')
map2 = nn.functional.upsample(
self.head2(map2), scale_factor=2, mode='nearest')
map1 = nn.functional.upsample(
self.head1(map1), scale_factor=1, mode='nearest')
smoothed = self.smooth(torch.cat([map4, map3, map2, map1], dim=1))
smoothed = nn.functional.upsample(
smoothed, scale_factor=2, mode='nearest')
smoothed = self.smooth2(smoothed + map0)
smoothed = nn.functional.upsample(
smoothed, scale_factor=2, mode='nearest')
final = self.final(smoothed)
res = torch.tanh(final) + x
return torch.clamp(res, min=-1, max=1)
@MODELS.register_module()
class DeblurGanV2Generator:
"""Defines the generator for DeblurGanv2 with the specified arguments..
Args:
model (Str): Type of the generator model
"""
def __new__(cls, backbone, *args, **kwargs):
if backbone == 'FPNInception':
return FPNInception(*args, **kwargs)
elif backbone == 'FPNMobileNet':
return FPNMobileNet(*args, **kwargs)
elif backbone == 'FPNInceptionSimple':
return FPNInceptionSimple(*args, **kwargs)
else:
raise Exception('Generator model {} not found, '
'Please use the following models: '
'{}'.format(backbone, backbone_list))
|
28a171d9353d1b9a3d1d28508b774ae8e59056a9
|
b684bd6c260a0ad43392d5dec4bd919fed686ed9
|
/tests/settings.py
|
480a744ecb4174d92e2508f8662f21f0de97fd81
|
[
"MIT"
] |
permissive
|
codingjoe/django-stdimage
|
e90a5d9c21653372cdfa8e366c2ea3389b425f2e
|
7100350161c540dcd949ab2cb1ea47b1e9de0208
|
refs/heads/master
| 2023-08-25T08:45:47.670247
| 2023-07-31T21:33:00
| 2023-08-03T07:19:02
| 15,314,657
| 285
| 80
|
MIT
| 2023-09-12T11:25:14
| 2013-12-19T15:08:05
|
Python
|
UTF-8
|
Python
| false
| false
| 983
|
py
|
settings.py
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import os
import tempfile
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"stdimage",
"tests",
)
DEFAULT_FILE_STORAGE = "tests.storage.MyFileSystemStorage"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
}
]
MIDDLEWARE = MIDDLEWARE_CLASSES = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
)
MEDIA_ROOT = tempfile.mkdtemp()
SITE_ID = 1
ROOT_URLCONF = "tests.urls"
SECRET_KEY = "foobar"
USE_TZ = True
|
09a088a34576eabf2cf99dd7a807804a296f0599
|
813f67f6815d0389589b719625c46b2265ca0f87
|
/starlette_exporter/labels.py
|
5fcd8807b85109b9adffa74b7c5e0f2a8c6c3553
|
[
"Apache-2.0"
] |
permissive
|
stephenhillier/starlette_exporter
|
c0916acb4c592617c31acbb3616a2f48c41aeb45
|
0de22e78233cf88e746b68db76945965d0bfbedf
|
refs/heads/master
| 2023-08-16T17:29:50.915043
| 2023-08-06T13:50:36
| 2023-08-06T13:50:36
| 217,774,698
| 266
| 30
|
Apache-2.0
| 2023-08-06T13:50:37
| 2019-10-26T22:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
labels.py
|
"""utilities for working with labels"""
from typing import Callable, Iterable, Optional
from starlette.requests import Request
def from_header(key: str, allowed_values: Optional[Iterable] = None) -> Callable:
"""returns a function that retrieves a header value from a request.
The returned function can be passed to the `labels` argument of PrometheusMiddleware
to label metrics using a header value.
`key`: header key
`allowed_values`: an iterable (e.g. list or tuple) containing an allowlist of values. Any
header value not in allowed_values will result in an empty string being returned. Use
this to constrain the potential label values.
example:
```
PrometheusMiddleware(
labels={
"host": from_header("X-User", allowed_values=("frank", "estelle"))
}
)
```
"""
def inner(r: Request):
v = r.headers.get(key, "")
# if allowed_values was supplied, return a blank string if
# the value of the header does match any of the values.
if allowed_values is not None and v not in allowed_values:
return ""
return v
return inner
|
3e313b8a04b12610995251a43c3d124fab5cd665
|
0c932b52ddb40867419dedd394934c059fc4ef7e
|
/scripts/dcommand.py
|
8182f547547f4030624748ed78b0341b3ed21d44
|
[
"MIT"
] |
permissive
|
allenai/allenact
|
ed614f745cbd8ddb1404af6e8d5e855b5d66908a
|
9772eeeb7eacc1f9a83c90d1cf549a3f7e783c12
|
refs/heads/main
| 2023-09-05T01:33:37.424674
| 2023-06-13T18:13:57
| 2023-06-13T18:13:57
| 233,944,205
| 266
| 58
|
NOASSERTION
| 2023-07-25T17:05:01
| 2020-01-14T21:58:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
dcommand.py
|
#!/usr/bin/env python3
"""Tool to run command on multiple nodes through SSH."""
import argparse
import glob
import os
def get_argument_parser():
"""Creates the argument parser."""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="dcommand", formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--runs_on",
required=False,
type=str,
default=None,
help="Comma-separated IP addresses of machines. If empty, the tool will scan for lists of IP addresses"
" in `screen_ids_file`s in the `~/.allenact` directory.",
)
parser.add_argument(
"--ssh_cmd",
required=False,
type=str,
default="ssh {addr}",
help="SSH command. Useful to utilize a pre-shared key with 'ssh -i path/to/mykey.pem ubuntu@{addr}'.",
)
parser.add_argument(
"--command",
required=False,
default="nvidia-smi | head -n 35",
type=str,
help="Command to be run through ssh onto each machine",
)
return parser
def get_args():
"""Creates the argument parser and parses any input arguments."""
parser = get_argument_parser()
args = parser.parse_args()
return args
def wrap_double(text):
return f'"{text}"'
def wrap_single(text):
return f"'{text}'"
def wrap_single_nested(text, quote=r"'\''"):
return f"{quote}{text}{quote}"
if __name__ == "__main__":
args = get_args()
all_addresses = []
if args.runs_on is not None:
all_addresses = args.runs_on.split(",")
else:
all_files = sorted(
glob.glob(os.path.join(os.path.expanduser("~"), ".allenact", "*.killfile")),
reverse=True,
)
if len(all_files) == 0:
print(
f"No screen_ids_file found under {os.path.join(os.path.expanduser('~'), '.allenact')}"
)
for killfile in all_files:
with open(killfile, "r") as f:
# Each line contains 'IP_address screen_ID'
nodes = [tuple(line[:-1].split(" ")) for line in f.readlines()]
all_addresses.extend(node[0] for node in nodes)
use_addresses = ""
while use_addresses not in ["y", "n"]:
use_addresses = input(
f"Run on {all_addresses} from {killfile}? [Y/n] "
).lower()
if use_addresses == "":
use_addresses = "y"
if use_addresses == "n":
all_addresses.clear()
else:
break
print(f"Running on IP addresses {all_addresses}")
for it, addr in enumerate(all_addresses):
ssh_command = f"{args.ssh_cmd.format(addr=addr)} {wrap_single(args.command)}"
print(f"{it} {addr} SSH command {ssh_command}")
os.system(ssh_command)
print("DONE")
|
734202481a4ecdf99acaa0ce19031b4a320b7681
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/third_party/webtest/webtest/utils.py
|
e6b1cf8ad209ceb856f25cbb5617d4f99cc9d6b2
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,214
|
py
|
utils.py
|
# -*- coding: utf-8 -*-
import re
import six
from json import dumps
from webtest.compat import urlencode
class NoDefault(object):
"""Sentinel to uniquely represent no default value."""
def __repr__(self):
return '<NoDefault>'
NoDefault = NoDefault()
def json_method(method):
"""Do a %(method)s request. Very like the
:class:`~webtest.TestApp.%(lmethod)s` method.
``params`` are dumped to json and put in the body of the request.
Content-Type is set to ``application/json``.
Returns a :class:`webtest.TestResponse` object.
"""
def wrapper(self, url, params=NoDefault, **kw):
content_type = 'application/json'
if params is not NoDefault:
params = dumps(params, cls=self.JSONEncoder)
kw.update(
params=params,
content_type=content_type,
upload_files=None,
)
return self._gen_request(method, url, **kw)
subst = dict(lmethod=method.lower(), method=method)
wrapper.__doc__ = json_method.__doc__ % subst
wrapper.__name__ = str('%(lmethod)s_json' % subst)
return wrapper
def stringify(value):
if isinstance(value, six.text_type):
return value
elif isinstance(value, six.binary_type):
return value.decode('utf8')
else:
return str(value)
entity_pattern = re.compile(r"&(\w+|#\d+|#[xX][a-fA-F0-9]+);")
def encode_params(params, content_type):
if params is NoDefault:
return ''
if isinstance(params, dict) or hasattr(params, 'items'):
params = list(params.items())
if isinstance(params, (list, tuple)):
if content_type:
content_type = content_type.lower()
if 'charset=' in content_type:
charset = content_type.split('charset=')[1]
charset = charset.strip('; ').lower()
encoded_params = []
for k, v in params:
if isinstance(v, six.text_type):
v = v.encode(charset)
encoded_params.append((k, v))
params = encoded_params
params = urlencode(params, doseq=True)
return params
def make_pattern(pat):
"""Find element pattern can be a regex or a callable."""
if pat is None:
return None
if isinstance(pat, six.binary_type):
pat = pat.decode('utf8')
if isinstance(pat, six.text_type):
pat = re.compile(pat)
if hasattr(pat, 'search'):
return pat.search
if hasattr(pat, '__call__'):
return pat
raise ValueError(
"Cannot make callable pattern object out of %r" % pat)
class _RequestCookieAdapter(object):
"""
cookielib.CookieJar support for webob.Request
"""
def __init__(self, request):
self._request = request
self.origin_req_host = request.host
def is_unverifiable(self):
return True # sure? Why not?
@property
def unverifiable(self): # NOQA
# This is undocumented method that Python 3 cookielib uses
return True
def get_full_url(self):
return self._request.url
def get_host(self):
return self.origin_req_host
get_origin_req_host = get_host
def add_unredirected_header(self, key, header):
self._request.headers[key] = header
def has_header(self, key):
return key in self._request.headers
def get_type(self):
return self._request.scheme
@property
def type(self): # NOQA
# This is undocumented method that Python 3 cookielib uses
return self.get_type()
def header_items(self): # pragma: no cover
# This is unused on most python versions
return self._request.headers.items()
class _ResponseCookieAdapter(object):
"""
cookielib.CookieJar support for webob.Response
"""
def __init__(self, response):
self._response = response
def info(self):
return self
def getheaders(self, header):
return self._response.headers.getall(header)
def get_all(self, headers, default): # NOQA
# This is undocumented method that Python 3 cookielib uses
return self._response.headers.getall(headers)
|
33d186848df999925085d997081f4b01d643756d
|
e22eeb5256e17a96a98b3ff25433aec2d641cd2c
|
/openstack/tests/unit/accelerator/v2/test_device.py
|
c354d522253ed753dea1277a9bf37c7b8dbef2a7
|
[
"Apache-2.0"
] |
permissive
|
openstack/openstacksdk
|
b4b95fd7869653feea5a3b783e9a5c588235c039
|
d474eb84c605c429bb9cccb166cabbdd1654d73c
|
refs/heads/master
| 2023-09-03T22:50:03.398512
| 2023-07-27T14:09:35
| 2023-08-29T16:28:46
| 16,223,378
| 124
| 130
|
Apache-2.0
| 2023-09-06T02:52:47
| 2014-01-25T02:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,030
|
py
|
test_device.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.accelerator.v2 import device
from openstack.tests.unit import base
EXAMPLE = {
'id': '1',
'uuid': uuid.uuid4(),
'created_at': '2019-08-09T12:14:57.233772',
'updated_at': '2019-08-09T12:15:57.233772',
'type': 'test_type',
'vendor': '0x8086',
'model': 'test_model',
'std_board_info': '{"product_id": "0x09c4"}',
'vendor_board_info': 'test_vb_info',
}
class TestDevice(base.TestCase):
def test_basic(self):
sot = device.Device()
self.assertEqual('device', sot.resource_key)
self.assertEqual('devices', sot.resources_key)
self.assertEqual('/devices', sot.base_path)
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = device.Device(**EXAMPLE)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['uuid'], sot.uuid)
self.assertEqual(EXAMPLE['type'], sot.type)
self.assertEqual(EXAMPLE['vendor'], sot.vendor)
self.assertEqual(EXAMPLE['model'], sot.model)
self.assertEqual(EXAMPLE['std_board_info'], sot.std_board_info)
self.assertEqual(EXAMPLE['vendor_board_info'], sot.vendor_board_info)
self.assertEqual(EXAMPLE['created_at'], sot.created_at)
self.assertEqual(EXAMPLE['updated_at'], sot.updated_at)
|
9dd82380d808a4afb48af1116ba1490823d4d92f
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/apps/jobbrowser/src/jobbrowser/api2.py
|
b51fe0f7dc1e159df56d9d94fe8cf172b0203c5f
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,172
|
py
|
api2.py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import sys
from urllib.request import Request, urlopen
from django.http import HttpResponse
from desktop.lib.i18n import smart_unicode
from desktop.lib.django_util import JsonResponse
from desktop.views import serve_403_error
from jobbrowser.apis.base_api import get_api
from jobbrowser.apis.query_store import query_store_proxy, stream_download_bundle
from jobbrowser.conf import DISABLE_KILLING_JOBS, USE_PROXY
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger()
def api_error_handler(func):
def decorator(*args, **kwargs):
response = {}
try:
return func(*args, **kwargs)
except Exception as e:
LOG.exception('Error running %s' % func)
response['status'] = -1
response['message'] = smart_unicode(e)
finally:
if response:
return JsonResponse(response)
return decorator
@api_error_handler
def jobs(request, interface=None):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = interface or json.loads(request.POST.get('interface'))
filters = dict([(key, value) for _filter in json.loads(
request.POST.get('filters', '[]')) for key, value in list(_filter.items()) if value
])
if interface == 'queries-hive':
filters = json.loads(request.body)
jobs = get_api(request.user, interface, cluster=cluster).apps(filters)
response['apps'] = jobs['apps']
response['total'] = jobs.get('total')
response['status'] = 0
if interface == 'queries-hive':
return JsonResponse(response['apps'])
else:
return JsonResponse(response)
@api_error_handler
def job(request, interface=None):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = interface or json.loads(request.POST.get('interface'))
if interface == 'queries-hive':
app_id = json.loads(request.body)['queryId']
else:
app_id = json.loads(request.POST.get('app_id'))
if interface == 'schedules':
filters = dict([(key, value) for _filter in json.loads(
request.POST.get('filters', '[]')) for key, value in list(_filter.items()) if value
])
offset = json.loads(request.POST.get('pagination', '{"offset": 1}')).get('offset')
response_app = get_api(request.user, interface, cluster=cluster).app(app_id, offset=offset, filters=filters)
else:
response_app = get_api(request.user, interface, cluster=cluster).app(app_id)
if response_app.get('status') == -1 and response_app.get('message'):
response.update(response_app)
else:
response['app'] = response_app
response['status'] = 0
if interface == 'queries-hive':
return JsonResponse(response['app'])
else:
return JsonResponse(response)
@api_error_handler
def action(request, interface=None, action=None):
response = {'status': -1, 'message': ''}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = json.loads(request.POST.get('interface'))
app_ids = json.loads(request.POST.get('app_ids'))
operation = json.loads(request.POST.get('operation'))
if operation.get('action') == 'kill' and DISABLE_KILLING_JOBS.get():
return serve_403_error(request)
response['operation'] = operation
response.update(
get_api(request.user, interface, cluster=cluster).action(app_ids, operation)
)
return JsonResponse(response)
@api_error_handler
def logs(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = json.loads(request.POST.get('interface'))
app_id = json.loads(request.POST.get('app_id'))
app_type = json.loads(request.POST.get('type'))
log_name = json.loads(request.POST.get('name'))
response['logs'] = get_api(request.user, interface, cluster=cluster).logs(
app_id, app_type, log_name, json.loads(request.GET.get('is_embeddable', 'false').lower())
)
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def profile(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = json.loads(request.POST.get('interface'))
app_id = json.loads(request.POST.get('app_id'))
app_type = json.loads(request.POST.get('app_type'))
app_property = json.loads(request.POST.get('app_property'))
app_filters = dict([
(key, value) for _filter in json.loads(request.POST.get('app_filters', '[]'))
for key, value in list(_filter.items()) if value
])
api = get_api(request.user, interface, cluster=cluster)
api._set_request(request) # For YARN
resp = api.profile(app_id, app_type, app_property, app_filters)
if isinstance(resp, HttpResponse):
return resp
else:
response[app_property] = resp
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def query_store_api(request, path=None):
response = {'status': -1}
if USE_PROXY.get():
response = query_store_proxy(request, path)
else:
if path == 'api/query/search':
filters = json.loads(request.body)
resp = get_api(request.user, interface='queries-hive').apps(filters['search'])
response = resp['apps']
return JsonResponse(response)
@api_error_handler
def query_store_download_bundle(request, id=None):
return stream_download_bundle(request, id)
|
cea5c23229df01446da78f6892265a0ad343d46c
|
439f48720042970a7f9b156fbe9442b3ca66cbd9
|
/omrdatasettools/CapitanImageGenerator.py
|
b14847e8c02dc501394e4f127b91b64baa4fd0ef
|
[
"MIT",
"CC-BY-SA-3.0",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"CC-BY-NC-SA-4.0",
"GPL-1.0-or-later",
"AGPL-3.0-only",
"CC-BY-SA-4.0"
] |
permissive
|
apacha/OMR-Datasets
|
93e73a020044f3432c2239070acd1eb5ae914229
|
e950865749e3f7ec20a54db1594f76842f65cc40
|
refs/heads/main
| 2022-12-02T20:35:58.915823
| 2022-11-27T21:58:25
| 2022-11-27T21:58:25
| 97,099,904
| 261
| 47
|
MIT
| 2022-11-27T12:39:50
| 2017-07-13T08:38:39
|
Python
|
UTF-8
|
Python
| false
| false
| 11,627
|
py
|
CapitanImageGenerator.py
|
import argparse
import os
from typing import List
import numpy
from PIL import Image, ImageDraw
from tqdm import tqdm
from omrdatasettools.Point2D import Point2D
from omrdatasettools.ExportPath import ExportPath
from omrdatasettools.Rectangle import Rectangle
class SimplePoint2D(object):
def __init__(self, x: float, y: float) -> None:
self.x = x
self.y = y
class CapitanSymbol:
def __init__(self, content: str, stroke: List[SimplePoint2D], image_data: numpy.ndarray, symbol_class: str,
dimensions: Rectangle) -> None:
super().__init__()
self.dimensions = dimensions
self.symbol_class = symbol_class
self.content = content
self.stroke = stroke
self.image_data = image_data
@staticmethod
def initialize_from_string(content: str) -> 'CapitanSymbol':
"""
Create and initializes a new symbol from a string
:param content: The content of a symbol as read from the text-file in the form <label>:<sequence>:<image>
:return: The initialized symbol
:rtype: CapitanSymbol
"""
if content is None or content is "":
return None
parts = content.split(":")
min_x = 100000
max_x = 0
min_y = 100000
max_y = 0
symbol_name = parts[0]
sequence = parts[1]
image_numbers = parts[2].split(',')
image_data = numpy.asarray(image_numbers, numpy.uint8).reshape((30, 30))
stroke = []
for point_string in sequence.split(";"):
if point_string is "":
continue # Skip the last element, that is due to a trailing ; in each line
point_x, point_y = point_string.split(",")
x = float(point_x)
y = float(point_y)
stroke.append(SimplePoint2D(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
dimensions = Rectangle(Point2D(min_x, min_y), int(max_x - min_x + 1), int(max_y - min_y + 1))
return CapitanSymbol(content, stroke, image_data, symbol_name, dimensions)
def draw_capitan_score_bitmap(self, export_path: ExportPath) -> None:
"""
Draws the 30x30 symbol into the given file
:param export_path: The path, where the symbols should be created on disk
"""
with Image.fromarray(self.image_data, mode='L') as image:
image.save(export_path.get_full_path())
def draw_capitan_stroke_onto_canvas(self, export_path: ExportPath, stroke_thickness: int, margin: int):
"""
Draws the symbol strokes onto a canvas
:param export_path: The path, where the symbols should be created on disk
:param stroke_thickness:
:param margin:
"""
width = int(self.dimensions.width + 2 * margin)
height = int(self.dimensions.height + 2 * margin)
offset = Point2D(self.dimensions.origin.x - margin, self.dimensions.origin.y - margin)
image = Image.new('RGB', (width, height), "white") # create a new white image
draw = ImageDraw.Draw(image)
black = (0, 0, 0)
for i in range(0, len(self.stroke) - 1):
start_point = self.__subtract_offset(self.stroke[i], offset)
end_point = self.__subtract_offset(self.stroke[i + 1], offset)
distance = self.__euclidean_distance(start_point, end_point)
if distance > 1600: # User moved more than 40 pixels - probably we should not draw a line here
continue
draw.line((start_point.x, start_point.y, end_point.x, end_point.y), black, stroke_thickness)
del draw
image.save(export_path.get_full_path())
image.close()
@staticmethod
def __euclidean_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:
return (a.x - b.x) * (a.x - b.x) + abs(a.y - b.y) * abs(a.y - b.y)
@staticmethod
def __manhatten_distance(a: SimplePoint2D, b: SimplePoint2D) -> float:
return abs(a.x - b.x) + abs(a.y - b.y)
@staticmethod
def __subtract_offset(a: SimplePoint2D, b: SimplePoint2D) -> SimplePoint2D:
return SimplePoint2D(a.x - b.x, a.y - b.y)
class CapitanImageGenerator:
def create_capitan_images(self, raw_data_directory: str,
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by parsing all text-files and the symbols as specified
by the parameters by drawing lines that connect the points from each stroke of each symbol.
:param raw_data_directory: The directory, that contains the raw capitan dataset
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
symbols = self.load_capitan_symbols(raw_data_directory)
self.draw_capitan_stroke_images(symbols, destination_directory, stroke_thicknesses)
self.draw_capitan_score_images(symbols, destination_directory)
def load_capitan_symbols(self, raw_data_directory: str) -> List[CapitanSymbol]:
data_path = os.path.join(raw_data_directory, "BimodalHandwrittenSymbols", "data")
with open(data_path) as file:
data = file.read()
symbol_strings = data.splitlines()
symbols = []
for symbol_string in tqdm(symbol_strings, desc="Loading symbols from strings"):
symbol = CapitanSymbol.initialize_from_string(symbol_string)
symbols.append(symbol)
return symbols
def draw_capitan_stroke_images(self, symbols: List[CapitanSymbol],
destination_directory: str,
stroke_thicknesses: List[int]) -> None:
"""
Creates a visual representation of the Capitan strokes by drawing lines that connect the points
from each stroke of each symbol.
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols) * len(stroke_thicknesses)
output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})".format(
total_number_of_symbols, len(symbols), len(stroke_thicknesses), stroke_thicknesses)
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering strokes")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-stroke".format(symbol.symbol_class,
capitan_file_name_counter)
for stroke_thickness in stroke_thicknesses:
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension,
'png', stroke_thickness)
symbol.draw_capitan_stroke_onto_canvas(export_path, stroke_thickness, 0)
progress_bar.update(1)
progress_bar.close()
def draw_capitan_score_images(self, symbols: List[CapitanSymbol],
destination_directory: str) -> None:
"""
Draws the image data contained in each symbol
:param symbols: The list of parsed Capitan-symbols
:param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per
symbol category will be generated automatically
:param stroke_thicknesses: The thickness of the pen, used for drawing the lines in pixels. If multiple are
specified, multiple images will be generated that have a different suffix, e.g.
1-16-3.png for the 3-px version and 1-16-2.png for the 2-px version of the image 1-16
"""
total_number_of_symbols = len(symbols)
output = "Generating {0} images from Capitan symbols".format(len(symbols))
print(output)
print("In directory {0}".format(os.path.abspath(destination_directory)), flush=True)
progress_bar = tqdm(total=total_number_of_symbols, mininterval=0.25, desc="Rendering images")
capitan_file_name_counter = 0
for symbol in symbols:
capitan_file_name_counter += 1
target_directory = os.path.join(destination_directory, symbol.symbol_class)
os.makedirs(target_directory, exist_ok=True)
raw_file_name_without_extension = "capitan-{0}-{1}-score".format(symbol.symbol_class,
capitan_file_name_counter)
export_path = ExportPath(destination_directory, symbol.symbol_class, raw_file_name_without_extension, 'png')
symbol.draw_capitan_score_bitmap(export_path)
progress_bar.update(1)
progress_bar.close()
@staticmethod
def add_arguments_for_homus_image_generator(parser: argparse.ArgumentParser):
parser.add_argument("-s", "--stroke_thicknesses", dest="stroke_thicknesses", default="3",
help="Stroke thicknesses for drawing the generated bitmaps. May define comma-separated list"
" of multiple stroke thicknesses, e.g. '1,2,3'")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--raw_dataset_directory",
type=str,
default="../data/capitan_raw",
help="The directory, where the raw HOMUS dataset can be found (the text-files that contain the strokes)")
parser.add_argument(
"--image_dataset_directory",
type=str,
default="../data/images",
help="The directory, where the generated bitmaps will be created")
image_generator = CapitanImageGenerator()
image_generator.add_arguments_for_homus_image_generator(parser)
flags, unparsed = parser.parse_known_args()
image_generator.create_capitan_images(flags.raw_dataset_directory, flags.image_dataset_directory,
[int(s) for s in flags.stroke_thicknesses.split(',')])
|
47e483eec18ded57f36b3c4f03bf5f7dae8b311f
|
ef44147ba4e396987d165d95728c3d9acd531ec0
|
/.travis/refresh_license.py
|
bcd4888f4db2e1715782c51dc5f7a378a549e3ad
|
[
"MIT"
] |
permissive
|
lexus2k/lcdgfx
|
e92e5832e883067d6411cb461cb8d4cf6e05d9f2
|
0f772e5a90b559d3b24b3f2a6239ac76bc038cce
|
refs/heads/master
| 2023-08-30T11:05:06.476852
| 2023-08-19T08:59:40
| 2023-08-19T08:59:40
| 204,888,210
| 307
| 49
|
MIT
| 2023-07-04T14:57:24
| 2019-08-28T08:49:49
|
C++
|
UTF-8
|
Python
| false
| false
| 6,428
|
py
|
refresh_license.py
|
import os
import datetime
import hashlib
exclude_paths = [
'./.git',
'./.github',
'./.travis',
'./tools/templates',
]
include_files = [
'.cpp',
'.c',
'.inl',
'.py',
'.h',
]
code = []
hash_db = {}
def get_hash(data):
hash_md5 = hashlib.md5()
hash_md5.update(data.encode())
return hash_md5.hexdigest()
def open_hash_db(name):
if not os.path.exists(name):
return False
with open(name) as f:
lines = f.readlines()
for l in lines:
k = l.split(':')[0]
v = l.split(':')[1].strip()
hash_db[k] = v
return True
def save_hash_db(name):
with open(name, "w") as f:
for v in hash_db.keys():
f.write(v + ":" + hash_db[v] + "\n")
return True
def update_hash_in_db(name, h):
hash_db[name] = h
def get_hash_from_db(name):
if name in hash_db:
return hash_db[name]
return None
def is_file_modified(name):
with open(name) as f:
lines = f.readlines()
# Calculate hash for the file
new_hash = str(get_hash(''.join(lines)))
old_hash = get_hash_from_db(name)
if old_hash is not None:
if old_hash != new_hash:
file_modified = True
else:
file_modified = False
else:
file_modified = False
return (file_modified, new_hash)
def check_and_update_license(name, new_license = False):
with open(name) as f:
lines = f.readlines()
(file_modified, new_hash) = is_file_modified(name)
copyright_exists = False
for i in range(min(10,len(lines))):
if "Copyright" in lines[i]:
now = datetime.datetime.now()
# Copyright 2022 (C) Alexey Dynda
ccc = "(C)"
if "(c)" in lines[i]:
ccc = "(c)"
if "Copyright {}".format(ccc) in lines[i]:
YEAR = lines[i].split("Copyright {}".format(ccc))[1].split(',')[0].strip()
else:
YEAR = lines[i].split("Copyright")[1].split(ccc)[0].strip()
if YEAR.endswith(','):
YEAR = YEAR[:-1]
if not YEAR.endswith("{}".format(now.year)):
if YEAR.endswith("-{}".format(now.year-1)):
YEAR = YEAR[:-5]
YEAR += "-{}".format(now.year)
elif YEAR.endswith("{}".format(now.year-1)):
YEAR += "-{}".format(now.year)
else:
YEAR += ",{}".format(now.year)
copyright_exists = True
insert_position = 0
if not copyright_exists:
now = datetime.datetime.now()
YEAR = "{}".format(now.year)
else:
# Cut old license
start = -1
end = -1
for i in range(len(lines) - 1):
if start < 0 and i > 10:
copyright_exists = False
break
if start < 0 and (lines[i] == "/*\n" or lines[i] == "#\n" or lines[i] == "\"\"\"\n"):
start = i
continue
if start >= 0 and start < 6 and (lines[i] == "*/\n" or lines[i] == " */\n" or
(lines[i] == "#\n" and lines[i+1] == "\n") or
(lines[i] == "\"\"\"\n" and lines[i+1] == "\n")):
end = i
break
if start >= 0 and end >=0:
temp = []
if start > 0:
temp = lines[:start-1]
temp.extend(lines[end+1:])
lines = temp
insert_position = start
if copyright_exists and not file_modified and not new_license:
# We do not need to modify the license
update_hash_in_db(name, new_hash)
return
if not copyright_exists and name.endswith(".py") and len(lines) > 0 and lines[0].startswith('#'):
insert_position = 1
license = [e + '\n' for e in TEXT.format("Copyright {} (C) Alexey Dynda".format(YEAR)).split('\n')]
if not copyright_exists:
license.append("\n")
if name.endswith(".py"):
for i in range(len(license)):
if license[i] != "\n":
license[i] = " " + license[i]
license.insert(0, "\"\"\"\n")
license.append("\"\"\"\n")
if not copyright_exists:
license.append("\n")
else:
for i in range(len(license)):
if license[i] != "\n":
license[i] = " " + license[i]
license.insert(0, "/*\n")
license.append("*/\n")
if not copyright_exists:
license.append("\n")
for t in reversed(license):
lines.insert(insert_position,t)
with open(name, "w") as f:
for l in lines:
f.write(l)
new_hash = str(get_hash(''.join(lines)))
update_hash_in_db(name, new_hash)
# print(''.join(lines))
for root, dirs, files in os.walk("."):
path = root.split(os.sep)
skip = False
for d in exclude_paths:
if root.startswith( d ):
skip = True
if not skip:
for file in files:
for f in include_files:
p = root + '/' + file
if p.endswith( f ):
code.append( p )
break
TEXT = ''' MIT License
{}
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
open_hash_db('.hash_db')
for c in code:
# print(c)
check_and_update_license(c, False)
save_hash_db('.hash_db')
|
cd035ce301827d37f8fbeb6e76d984eb14d1788e
|
78e9d3d12e96fd37817e510b7b272d0d08ec4053
|
/models/networks.py
|
6be74fb043df12a223771c681e40023f83ed39ba
|
[] |
no_license
|
ly015/intrinsic_flow
|
15e3926ccc269d3f37d37ceb74d1a3b2ff41823c
|
94ea8f0b6c2e9d6380a29055eaa5b0068e894a25
|
refs/heads/master
| 2022-01-31T05:58:35.091723
| 2022-01-17T03:58:21
| 2022-01-17T03:58:21
| 178,152,311
| 158
| 26
| null | 2022-01-17T03:58:22
| 2019-03-28T07:45:22
|
Python
|
UTF-8
|
Python
| false
| false
| 34,715
|
py
|
networks.py
|
from __future__ import division
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import os
import numpy as np
from modules import *
from skimage.measure import compare_ssim, compare_psnr
import functools
##############################################
# Image generation networks: Unet and DualUnet
##############################################
def conv(in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, bias=False, norm_layer=nn.BatchNorm2d):
model = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias),
norm_layer(out_channels),
)
return model
def channel_mapping(in_channels, out_channels, norm_layer=nn.BatchNorm2d, bias=False):
return conv(in_channels, out_channels, kernel_size=1, norm_layer=norm_layer, bias=bias)
class ResidualBlock(nn.Module):
'''
Derived from Variational UNet.
'''
def __init__(self, dim, dim_a, norm_layer=nn.BatchNorm2d, use_bias=False, activation=nn.ReLU(False), use_dropout=False, no_end_norm=False):
super(ResidualBlock, self).__init__()
self.use_dropout = use_dropout
self.activation = activation
if dim_a <= 0 or dim_a is None:
# w/o additional input
if no_end_norm:
self.conv = conv(in_channels=dim, out_channels=dim, kernel_size=3, padding=1, norm_layer=Identity, bias=True)
else:
self.conv = conv(in_channels=dim, out_channels=dim, kernel_size=3, padding=1, norm_layer=norm_layer, bias=use_bias)
else:
# w/ additional input
self.conv_a = channel_mapping(in_channels=dim_a, out_channels=dim,norm_layer=norm_layer, bias=use_bias)
if no_end_norm:
self.conv = conv(in_channels=dim*2, out_channels=dim, kernel_size=3, padding=1, norm_layer=Identity, bias=True)
else:
self.conv = conv(in_channels=dim*2, out_channels=dim, kernel_size=3, padding=1, norm_layer=norm_layer, bias=use_bias)
def forward(self, x, a=None):
if a is None:
# w/o additional input
residual = x
else:
# w/ additional input
a = self.conv_a(self.activation(a))
residual = torch.cat((x, a), dim=1)
residual = self.conv(self.activation(residual))
out = x + residual
if self.use_dropout:
out = F.dropout(out, p=0.5, training=self.training)
return out
class GateBlock(nn.Module):
def __init__(self, dim, dim_a,activation=nn.ReLU(False)):
super(GateBlock, self).__init__()
self.activation = activation
self.conv = nn.Conv2d(in_channels=dim_a, out_channels=dim, kernel_size=1)
def forward(self, x, a):
'''
x: (bsz, dim, h, w)
a: (bsz, dim_a, h, w)
'''
a = self.activation(a)
g = F.sigmoid(self.conv(a))
return x*g
class UnetGenerator(nn.Module):
'''
A variation of Unet that use residual blocks instead of convolution layer at each scale
'''
def __init__(self, input_nc, output_nc, nf=64, max_nf=256, num_scales=7, n_residual_blocks=2, norm='batch', activation=nn.ReLU(False), use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.nf = nf
self.max_nf = max_nf
self.num_scales = num_scales
self.n_residual_blocks = n_residual_blocks
self.norm = norm
self.gpu_ids = gpu_ids
self.use_dropout = use_dropout
if norm == 'batch':
norm_layer = nn.BatchNorm2d
use_bias = False
elif norm == 'instance':
norm_layer = nn.InstanceNorm2d
use_bias = True
else:
raise NotImplementedError()
self.pre_conv = channel_mapping(input_nc, nf, norm_layer, use_bias)
for l in range(num_scales):
c_in = min(nf * (l+1), max_nf)
c_out = min(nf * (l+2), max_nf)
# encoding layers
for i in range(n_residual_blocks):
self.__setattr__('enc_%d_res_%d'%(l, i), ResidualBlock(c_in, None, norm_layer, use_bias, activation, use_dropout=False))
downsample = nn.Sequential(
activation,
nn.Conv2d(c_in, c_out, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(c_out)
)
self.__setattr__('enc_%d_downsample'%l, downsample)
# decoding layers
upsample = nn.Sequential(
activation,
nn.Conv2d(c_out, c_in*4, kernel_size=3, padding=1, bias=use_bias),
nn.PixelShuffle(2),
norm_layer(c_in)
)
self.__setattr__('dec_%d_upsample'%l, upsample)
for i in range(n_residual_blocks):
self.__setattr__('dec_%d_res_%d'%(l, i), ResidualBlock(c_in, c_in, norm_layer, use_bias, activation, use_dropout))
self.dec_output = nn.Sequential(
nn.ReflectionPad2d(3),
nn.Conv2d(nf, output_nc, kernel_size=7, padding=0, bias=True)
)
def forward(self, x, single_device=False):
if len(self.gpu_ids) > 1 and (not single_device):
return nn.parallel.data_parallel(self, x, module_kwargs={'single_device':True})
else:
hiddens = []
x = self.pre_conv(x)
# encode
for l in range(self.num_scales):
for i in range(self.n_residual_blocks):
x = self.__getattr__('enc_%d_res_%d'%(l,i))(x)
hiddens.append(x)
x = self.__getattr__('enc_%d_downsample'%l)(x)
# decode
for l in range(self.num_scales-1,-1,-1):
x = self.__getattr__('dec_%d_upsample'%l)(x)
for i in range(self.n_residual_blocks-1,-1,-1):
h = hiddens.pop()
x = self.__getattr__('dec_%d_res_%d'%(l, i))(x,h)
out = self.dec_output(x)
return out
class UnetGenerator_MultiOutput(nn.Module):
'''
A variation of UnetGenerator that support multiple output branches
'''
def __init__(self, input_nc, output_nc = [3], nf=64, max_nf=256, num_scales=7, n_residual_blocks=2, norm='batch', activation=nn.ReLU(False), use_dropout=False, gpu_ids=[]):
super(UnetGenerator_MultiOutput, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc if isinstance(output_nc, list) else [output_nc]
self.nf = nf
self.max_nf = max_nf
self.num_scales = num_scales
self.n_residual_blocks = n_residual_blocks
self.norm = norm
self.gpu_ids = gpu_ids
self.use_dropout = use_dropout
if norm == 'batch':
norm_layer = nn.BatchNorm2d
use_bias = False
elif norm == 'instance':
norm_layer = nn.InstanceNorm2d
use_bias = True
else:
raise NotImplementedError()
self.pre_conv = channel_mapping(input_nc, nf, norm_layer, use_bias)
for l in range(num_scales):
c_in = min(nf * (l+1), max_nf)
c_out = min(nf * (l+2), max_nf)
# encoding layers
for i in range(n_residual_blocks):
self.__setattr__('enc_%d_res_%d'%(l, i), ResidualBlock(c_in, None, norm_layer, use_bias, activation, use_dropout=False))
downsample = nn.Sequential(
activation,
nn.Conv2d(c_in, c_out, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(c_out)
)
self.__setattr__('enc_%d_downsample'%l, downsample)
# decoding layers
upsample = nn.Sequential(
activation,
nn.Conv2d(c_out, c_in*4, kernel_size=3, padding=1, bias=use_bias),
nn.PixelShuffle(2),
norm_layer(c_in)
)
self.__setattr__('dec_%d_upsample'%l, upsample)
for i in range(n_residual_blocks):
self.__setattr__('dec_%d_res_%d'%(l, i), ResidualBlock(c_in, c_in, norm_layer, use_bias, activation, use_dropout))
for i, c_out in enumerate(output_nc):
dec_output_i = nn.Sequential(
channel_mapping(nf, nf, norm_layer, use_bias),
activation,
nn.ReflectionPad2d(3),
nn.Conv2d(nf, c_out, kernel_size=7, padding=0, bias=True)
)
self.__setattr__('dec_output_%d'%i, dec_output_i)
def forward(self, x, single_device=False):
if len(self.gpu_ids) > 1 and (not single_device):
return nn.parallel.data_parallel(self, x, module_kwargs={'single_device':True})
else:
hiddens = []
x = self.pre_conv(x)
# encode
for l in range(self.num_scales):
for i in range(self.n_residual_blocks):
x = self.__getattr__('enc_%d_res_%d'%(l,i))(x)
hiddens.append(x)
x = self.__getattr__('enc_%d_downsample'%l)(x)
# decode
for l in range(self.num_scales-1,-1,-1):
x = self.__getattr__('dec_%d_upsample'%l)(x)
for i in range(self.n_residual_blocks-1,-1,-1):
h = hiddens.pop()
x = self.__getattr__('dec_%d_res_%d'%(l, i))(x,h)
out = []
for i in range(len(self.output_nc)):
out.append(self.__getattr__('dec_output_%d'%i)(x))
return out
class DualUnetGenerator(nn.Module):
'''
A variation of Unet architecture, similar to deformable gan. It contains two encoders: one for target pose and one for appearance. The feature map of appearance encoder will be warped to target pose, guided
by input flow. There are skip connections from both encoders to the decoder.
'''
def __init__(self, pose_nc, appearance_nc, output_nc, aux_output_nc=[], nf=32, max_nf=128, num_scales=7, num_warp_scales=5, n_residual_blocks=2, norm='batch', vis_mode='none', activation=nn.ReLU(False), use_dropout=False, no_end_norm=False, gpu_ids=[]):
'''
vis_mode: ['none', 'hard_gate', 'soft_gate', 'residual']
no_end_norm: remove normalization layer at the start and the end.
'''
super(DualUnetGenerator, self).__init__()
self.pose_nc = pose_nc
self.appearance_nc = appearance_nc
self.output_nc = output_nc
self.nf = nf
self.max_nf = max_nf
self.num_scales = num_scales
self.num_warp_scales = num_warp_scales # at higher scales, warping will not be applied because the resolution of the feature map is too small
self.n_residual_blocks = n_residual_blocks
self.norm = norm
self.gpu_ids = gpu_ids
self.use_dropout = use_dropout
self.vis_mode = vis_mode
self.vis_expand_mult = 2 # expanded multiple when perform vis_expand
self.aux_output_nc = aux_output_nc
self.no_end_norm = no_end_norm
if norm == 'batch':
norm_layer = nn.BatchNorm2d
use_bias = False
elif norm == 'instance':
norm_layer = nn.InstanceNorm2d
use_bias = True
else:
raise NotImplementedError()
####################################
# input encoder
####################################
if not no_end_norm:
self.encp_pre_conv = channel_mapping(pose_nc, nf, norm_layer, use_bias)
self.enca_pre_conv = channel_mapping(appearance_nc, nf, norm_layer, use_bias)
else:
self.encp_pre_conv = channel_mapping(pose_nc, nf, Identity, True)
self.enca_pre_conv = channel_mapping(appearance_nc, nf, Identity, True)
for l in range(num_scales):
c_in = min(nf * (l+1), max_nf)
c_out = min(nf * (l+2), max_nf)
####################################
# pose encoder
####################################
# resblocks
for i in range(n_residual_blocks):
self.__setattr__('encp_%d_res_%d'%(l, i), ResidualBlock(c_in, None, norm_layer, use_bias, activation, use_dropout=False))
# down sample
p_downsample = nn.Sequential(
activation,
nn.Conv2d(c_in, c_out, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(c_out)
)
self.__setattr__('encp_%d_downsample'%l, p_downsample)
####################################
# appearance encoder
####################################
for i in range(n_residual_blocks):
# resblocks
self.__setattr__('enca_%d_res_%d'%(l, i), ResidualBlock(c_in, None, norm_layer, use_bias, activation, use_dropout=False))
# visibility gating
if l < num_warp_scales:
if vis_mode == 'hard_gate':
pass
elif vis_mode == 'soft_gate':
self.__setattr__('enca_%d_vis_%d'%(l, i), GateBlock(c_in, c_in*self.vis_expand_mult, activation))
elif vis_mode == 'residual':
self.__setattr__('enca_%d_vis_%d'%(l, i), ResidualBlock(c_in, c_in*self.vis_expand_mult, norm_layer, use_bias, activation, use_dropout=False))
elif vis_mode == 'res_no_vis':
self.__setattr__('enca_%d_vis_%d'%(l, i), ResidualBlock(c_in, None, norm_layer, use_bias, activation, use_dropout=False))
# down sample
a_downsample = nn.Sequential(
activation,
nn.Conv2d(c_in, c_out, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(c_out)
)
self.__setattr__('enca_%d_downsample'%l, p_downsample)
####################################
# decoder
####################################
# resblocks
if l == num_scales-1:
self.dec_fuse = channel_mapping(c_out*2, c_out, norm_layer, use_bias) # a fusion layer at the bottle neck
# upsample
upsample = nn.Sequential(
activation,
nn.Conv2d(c_out, c_in*4, kernel_size=3, padding=1, bias=use_bias),
nn.PixelShuffle(2),
norm_layer(c_in)
)
self.__setattr__('dec_%d_upsample'%l, upsample)
for i in range(n_residual_blocks):
if l == num_scales-1 and i == n_residual_blocks-1:
self.__setattr__('dec_%d_res_%d'%(l,i), ResidualBlock(c_in, c_in*2, norm_layer, use_bias, activation, use_dropout, no_end_norm=no_end_norm))
else:
self.__setattr__('dec_%d_res_%d'%(l,i), ResidualBlock(c_in, c_in*2, norm_layer, use_bias, activation, use_dropout))
####################################
# output decoder
####################################
self.dec_output = nn.Sequential(
nn.ReflectionPad2d(3),
nn.Conv2d(nf, output_nc, kernel_size=7, padding=0, bias=True)
)
for i, a_nc in enumerate(aux_output_nc):
dec_aux_output = nn.Sequential(
nn.ReflectionPad2d(3),
nn.Conv2d(nf, a_nc, kernel_size=7, padding=0, bias=True)
)
self.__setattr__('dec_aux_output_%d'%i, dec_aux_output)
def _vis_expand(self, feat, vis):
'''
expand feature from n channels to n*vis_expand_mult channels
'''
feat_exp = [feat*(vis==i).float() for i in range(self.vis_expand_mult)]
return torch.cat(feat_exp, dim=1)
def forward(self, x_p, x_a, flow=None, vis=None, output_feats=False, single_device=False):
'''
x_p: (bsz, pose_nc, h, w), pose input
x_a: (bsz, appearance_nc, h, w), appearance input
vis: (bsz, 1, h, w), 0-visible, 1-invisible, 2-background
flow: (bsz, 2, h, w) or None. if flow==None, feature warping will not be performed
'''
if len(self.gpu_ids)>1 and (not single_device):
if flow is not None:
assert vis is not None
return nn.parallel.data_parallel(self, (x_p, x_a, flow, vis), module_kwargs={'single_device':True, 'output_feats':output_feats})
else:
return nn.parallel.data_parallel(self, (x_p, x_a), module_kwargs={'flow':None, 'vis':None, 'single_device':True, 'output_feats':output_feats})
else:
use_fw = flow is not None
if use_fw:
vis = vis.round()
hidden_p = []
hidden_a = []
# encoding p
x_p = self.encp_pre_conv(x_p)
for l in range(self.num_scales):
for i in range(self.n_residual_blocks):
x_p = self.__getattr__('encp_%d_res_%d'%(l,i))(x_p)
hidden_p.append(x_p)
x_p = self.__getattr__('encp_%d_downsample'%l)(x_p)
# encoding a
x_a = self.enca_pre_conv(x_a)
for l in range(self.num_scales):
for i in range(self.n_residual_blocks):
x_a = self.__getattr__('enca_%d_res_%d'%(l,i))(x_a)
# feature warping
if use_fw and l < self.num_warp_scales:
if i == 0: # compute flow and vis once at each scale
flow_l = F.avg_pool2d(flow, kernel_size=2**l).div_(2**l) if l > 0 else flow
vis_l = -F.max_pool2d(-vis, kernel_size=2**l) if l > 0 else vis# the priority is visible>invisible>background
x_w = warp_acc_flow(x_a, flow_l)
if self.vis_mode == 'none':
pass
elif self.vis_mode == 'hard_gate':
x_w = x_w * (vis_l<2).float()
elif self.vis_mode == 'soft_gate':
x_we = self._vis_expand(x_w, vis_l)
x_w = self.__getattr__('enca_%d_vis_%d'%(l, i))(x_w, x_we)
elif self.vis_mode == 'residual':
x_we = self._vis_expand(x_w, vis_l)
x_w = self.__getattr__('enca_%d_vis_%d'%(l, i))(x_w, x_we)
elif self.vis_mode == 'res_no_vis':
x_w = self.__getattr__('enca_%d_vis_%d'%(l, i))(x_w)
hidden_a.append(x_w)
else:
hidden_a.append(x_a)
x_a = self.__getattr__('enca_%d_downsample'%l)(x_a)
# bottleneck fusion
x = self.dec_fuse(torch.cat((x_p, x_a), dim=1))
feats = [x]
# decoding
for l in range(self.num_scales-1, -1, -1):
x = self.__getattr__('dec_%d_upsample'%l)(x)
feats = [x] + feats
for i in range(self.n_residual_blocks-1, -1, -1):
h_p = hidden_p.pop()
h_a = hidden_a.pop()
x = self.__getattr__('dec_%d_res_%d'%(l, i))(x, torch.cat((h_p, h_a), dim=1))
out = self.dec_output(x)
if self.aux_output_nc or output_feats:
aux_out = []
if self.aux_output_nc:
for i in range(len(self.aux_output_nc)):
aux_out.append(self.__getattr__('dec_aux_output_%d'%i)(x))
if output_feats:
aux_out.append(feats)
return out, aux_out
else:
return out
class UnetDecoder(nn.Module):
'''
Decoder that decodes hierarachical features. Support multi-task output. Used as an external decoder of a DualUnetGenerator network
'''
def __init__(self, output_nc=[], nf=32, max_nf=128, num_scales=7, n_residual_blocks=2, norm='batch', activation=nn.ReLU(False), gpu_ids=[]):
super(UnetDecoder, self).__init__()
output_nc = output_nc if isinstance(output_nc, list) else [output_nc]
self.output_nc = output_nc
self.nf = nf
self.max_nf = max_nf
self.num_scales = num_scales
self.n_residual_blocks = n_residual_blocks
self.norm = norm
self.gpu_ids = gpu_ids
if norm == 'batch':
norm_layer = nn.BatchNorm2d
use_bias = False
elif norm == 'instance':
norm_layer = nn.InstanceNorm2d
use_bias = True
else:
raise NotImplementedError()
####################
# hierarchical decoding layers
####################
for l in range(num_scales):
c_in = min(nf*(l+1), max_nf) # lower feature dim
c_out = min(nf*(l+2), max_nf) # higher feature dim
upsample = nn.Sequential(
activation,
nn.Conv2d(c_out, c_in*4, kernel_size=3, padding=1, bias=use_bias),
nn.PixelShuffle(2),
norm_layer(c_in)
)
self.__setattr__('dec_%d_upsample'%l, upsample)
for i in range(n_residual_blocks):
self.__setattr__('dec_%d_res_%d'%(l,i), ResidualBlock(c_in, c_in if i==0 else None, norm_layer, use_bias, activation))
####################
# output decoders
####################
for i, c_out in enumerate(output_nc):
dec_output_i = nn.Sequential(
channel_mapping(nf, nf, norm_layer, use_bias),
activation,
nn.ReflectionPad2d(3),
nn.Conv2d(nf, c_out, kernel_size=7)
)
self.__setattr__('dec_output_%d'%i, dec_output_i)
def forward(self, feats, single_device=False):
if len(self.gpu_ids) > 1 and (not single_device):
nn.parallel.data_parallel(self, feats, module_kwargs={'single_device':True})
else:
x, hiddens = feats[-1], feats[:-1]
# decode
for l in range(self.num_scales-1, -1, -1):
x = self.__getattr__('dec_%d_upsample'%l)(x)
for i in range(self.n_residual_blocks):
if i == 0:
h = hiddens.pop()
x = self.__getattr__('dec_%d_res_%d'%(l,i))(x,h)
else:
x = self.__getattr__('dec_%d_res_%d'%(l,i))(x)
out = []
for i in range(len(self.output_nc)):
out.append(self.__getattr__('dec_output_%d'%i)(x))
return out
##############################################
# Flow networks
##############################################
class FlowUnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d):
super(FlowUnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
self.innermost = innermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv, downnorm]
up = [uprelu, upconv, upnorm]
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
self.down = nn.Sequential(*down)
self.up = nn.Sequential(*up)
self.submodule = submodule
self.predict_flow = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Conv2d(outer_nc, 2, kernel_size=3, stride=1, padding=1))
def forward(self, x):
if self.outermost:
x_ = self.down(x)
x_, x_pyramid, flow_pyramid = self.submodule(x_)
x_ = self.up(x_)
x_out = x_
elif self.innermost:
x_pyramid = []
flow_pyramid = []
x_ = self.up(self.down(x))
x_out = torch.cat((x, x_), dim=1)
else:
x_ = self.down(x)
x_, x_pyramid, flow_pyramid = self.submodule(x_)
x_ = self.up(x_)
x_out = torch.cat((x, x_), dim=1)
flow = self.predict_flow(x_)
x_pyramid = [x_] + x_pyramid
flow_pyramid = [flow] + flow_pyramid
return x_out, x_pyramid, flow_pyramid
class FlowUnet(nn.Module):
def __init__(self, input_nc, nf=16, start_scale=2, num_scale=5, norm='batch', gpu_ids=[], max_nf=512):
super(FlowUnet, self).__init__()
self.gpu_ids = gpu_ids
self.nf = nf
self.norm = norm
self.start_scale = 2
self.num_scale = 5
if norm == 'batch':
norm_layer = nn.BatchNorm2d
use_bias = False
elif norm == 'instance':
norm_layer = nn.InstanceNorm2d
use_bias = True
else:
raise NotImplementedError()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
conv_downsample = [
nn.Conv2d(input_nc, nf, kernel_size=7, padding=3, bias=use_bias),
norm_layer(nf),
nn.LeakyReLU(0.1)]
nc = nf
for i in range(np.log2(start_scale).astype(np.int)):
conv_downsample += [
nn.Conv2d(nc, 2*nc, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(2*nc),
nn.LeakyReLU(0.1)
]
nc = nc*2
self.conv_downsample = nn.Sequential(*conv_downsample)
unet_block = None
for l in range(num_scale)[::-1]:
outer_nc = min(max_nf, nc*2**l)
inner_nc = min(max_nf, nc*2**(l+1))
innermost = (l==num_scale-1)
outermost = (l==0)
unet_block = FlowUnetSkipConnectionBlock(outer_nc, inner_nc, input_nc=None, submodule=unet_block, norm_layer=norm_layer, innermost=innermost, outermost=outermost)
self.unet_block = unet_block
self.nf_out = min(max_nf, nc)
self.predict_vis = nn.Sequential(
nn.LeakyReLU(0.1),
nn.Conv2d(min(max_nf, nc), 3, kernel_size=3, stride=1, padding=1)
)
def forward(self, input, single_device=False):
if len(self.gpu_ids) > 1 and (not single_device):
return nn.parallel.data_parallel(self, input, module_kwargs={'single_device': True})
else:
x = self.conv_downsample(input)
feat_out, x_pyr, flow_pyr = self.unet_block(x)
vis = self.predict_vis(feat_out)
flow_out = F.upsample(flow_pyr[0], scale_factor=self.start_scale, mode='bilinear', align_corners=False)
vis = F.upsample(vis, scale_factor=self.start_scale, mode='bilinear', align_corners=False)
return flow_out, vis, flow_pyr, feat_out
class FlowUnet_v2(nn.Module):
'''
A variation of Unet that use residual blocks instead of convolution layer at each scale
'''
def __init__(self, input_nc, nf=64, max_nf=256, start_scale=2, num_scales=7, n_residual_blocks=2, norm='batch', activation=nn.ReLU(False), use_dropout=False, gpu_ids=[]):
super(FlowUnet_v2, self).__init__()
self.input_nc = input_nc
self.nf = nf
self.max_nf = max_nf
self.start_scale = start_scale
self.num_scales = num_scales
self.n_residual_blocks = n_residual_blocks
self.norm = norm
self.gpu_ids = gpu_ids
self.use_dropout = use_dropout
if norm == 'batch':
norm_layer = nn.BatchNorm2d
use_bias = False
elif norm == 'instance':
norm_layer = nn.InstanceNorm2d
use_bias = True
else:
raise NotImplementedError()
start_level = np.log2(start_scale).astype(np.int)
pre_conv = [channel_mapping(input_nc, nf, norm_layer, use_bias)]
for i in range(start_level):
c_in = min(nf*(i+1), max_nf)
c_out = min(nf*(i+2), max_nf)
pre_conv += [
ResidualBlock(c_in, None, norm_layer, use_bias, activation, use_dropout=use_dropout),
activation,
nn.Conv2d(c_in, c_out, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(c_out)
]
self.pre_conv = nn.Sequential(*pre_conv)
for l in range(num_scales):
c_in = min(nf * (start_level+l+1), max_nf)
c_out = min(nf * (start_level+l+2), max_nf)
# encoding layers
for i in range(n_residual_blocks):
self.__setattr__('enc_%d_res_%d'%(l, i), ResidualBlock(c_in, None, norm_layer, use_bias, activation, use_dropout=use_dropout))
downsample = nn.Sequential(
activation,
nn.Conv2d(c_in, c_out, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(c_out)
)
self.__setattr__('enc_%d_downsample'%l, downsample)
# decoding layers
upsample = nn.Sequential(
activation,
nn.Conv2d(c_out, c_in*4, kernel_size=3, padding=1, bias=use_bias),
nn.PixelShuffle(2),
norm_layer(c_in)
)
self.__setattr__('dec_%d_upsample'%l, upsample)
for i in range(n_residual_blocks):
self.__setattr__('dec_%d_res_%d'%(l, i), ResidualBlock(c_in, c_in, norm_layer, use_bias, activation, use_dropout))
# flow prediction
pred_flow = nn.Sequential(
activation,
nn.Conv2d(c_in, 2, kernel_size=3, padding=1, bias=True)
)
self.__setattr__('pred_flow_%d'%l, pred_flow)
# vis prediction
self.pred_vis = nn.Sequential(
activation,
nn.Conv2d(nf*(1+start_level), 3, kernel_size=3, padding=1, bias=True)
)
def forward(self, x, single_device=False):
if len(self.gpu_ids) > 1 and (not single_device):
return nn.parallel.data_parallel(self, x, module_kwargs={'single_device':True})
else:
hiddens = []
flow_pyr = []
x = self.pre_conv(x)
# encode
for l in range(self.num_scales):
for i in range(self.n_residual_blocks):
x = self.__getattr__('enc_%d_res_%d'%(l,i))(x)
hiddens.append(x)
x = self.__getattr__('enc_%d_downsample'%l)(x)
# decode
for l in range(self.num_scales-1,-1,-1):
x = self.__getattr__('dec_%d_upsample'%l)(x)
for i in range(self.n_residual_blocks-1,-1,-1):
h = hiddens.pop()
x = self.__getattr__('dec_%d_res_%d'%(l, i))(x,h)
flow_pyr = [self.__getattr__('pred_flow_%d'%l)(x)] + flow_pyr
feat_out = x
flow_out = F.upsample(flow_pyr[0], scale_factor=self.start_scale, mode='bilinear', align_corners=False)
vis_out = F.upsample(self.pred_vis(x), scale_factor=self.start_scale, mode='bilinear', align_corners=False)
return flow_out, vis_out, flow_pyr, feat_out
##############################################
# Discriminator network
##############################################
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, output_bias = True, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw, bias = output_bias)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input)
else:
return self.model(input)
|
1ee4aa351b0fdf6ec648c54f57361fc024d62e5d
|
7e1c4dd6a2cae0597b4f4e961063cf077acdfd4c
|
/couchbase/collection.py
|
2ff547f0bdc6941a03f9975289b23d35f0a05f71
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
couchbase/couchbase-python-client
|
753fa434db910d175bf9ea53a5829a40ba36e938
|
c7d80434be3f917d6f25439a918aed30273f63f4
|
refs/heads/master
| 2023-08-29T14:04:13.532717
| 2023-08-24T22:53:30
| 2023-08-25T03:35:21
| 2,122,194
| 223
| 87
|
Apache-2.0
| 2023-05-30T16:05:59
| 2011-07-29T04:24:46
|
Python
|
UTF-8
|
Python
| false
| false
| 118,216
|
py
|
collection.py
|
# Copyright 2016-2022. Couchbase, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from copy import copy
from typing import (TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Tuple,
Union)
from couchbase.binary_collection import BinaryCollection
from couchbase.datastructures import (CouchbaseList,
CouchbaseMap,
CouchbaseQueue,
CouchbaseSet)
from couchbase.exceptions import (DocumentExistsException,
ErrorMapper,
InvalidArgumentException,
PathExistsException,
QueueEmpty)
from couchbase.exceptions import exception as CouchbaseBaseException
from couchbase.kv_range_scan import RangeScanRequest
from couchbase.logic import (BlockingWrapper,
decode_replicas,
decode_value)
from couchbase.logic.collection import CollectionLogic
from couchbase.logic.supportability import Supportability
from couchbase.management.queries import CollectionQueryIndexManager
from couchbase.options import (AppendMultiOptions,
DecrementMultiOptions,
ExistsMultiOptions,
GetAllReplicasMultiOptions,
GetAnyReplicaMultiOptions,
GetMultiOptions,
IncrementMultiOptions,
InsertMultiOptions,
LockMultiOptions,
PrependMultiOptions,
RemoveMultiOptions,
ReplaceMultiOptions,
ScanOptions,
TouchMultiOptions,
UnlockMultiOptions,
UpsertMultiOptions,
forward_args,
get_valid_multi_args)
from couchbase.pycbc_core import (binary_multi_operation,
kv_multi_operation,
operations)
from couchbase.result import (CounterResult,
ExistsResult,
GetReplicaResult,
GetResult,
LookupInReplicaResult,
LookupInResult,
MultiCounterResult,
MultiExistsResult,
MultiGetReplicaResult,
MultiGetResult,
MultiMutationResult,
MutateInResult,
MutationResult,
OperationResult,
ScanResultIterable)
from couchbase.subdocument import (array_addunique,
array_append,
array_prepend,
count)
from couchbase.subdocument import get as subdoc_get
from couchbase.subdocument import remove as subdoc_remove
from couchbase.subdocument import replace
from couchbase.subdocument import upsert as subdoc_upsert
from couchbase.transcoder import Transcoder
if TYPE_CHECKING:
from datetime import timedelta
from couchbase._utils import JSONType
from couchbase.kv_range_scan import ScanType
from couchbase.options import (AppendOptions,
DecrementOptions,
ExistsOptions,
GetAndLockOptions,
GetAndTouchOptions,
GetAnyReplicaOptions,
GetOptions,
IncrementOptions,
InsertOptions,
LookupInAllReplicasOptions,
LookupInAnyReplicaOptions,
LookupInOptions,
MutateInOptions,
MutationMultiOptions,
NoValueMultiOptions,
PrependOptions,
RemoveOptions,
ReplaceOptions,
TouchOptions,
UnlockOptions,
UpsertOptions)
from couchbase.result import MultiResultType
from couchbase.subdocument import Spec
class Collection(CollectionLogic):
def __init__(self, scope, name):
super().__init__(scope, name)
def get(self,
key, # type: str
*opts, # type: GetOptions
**kwargs, # type: Dict[str, Any]
) -> GetResult:
"""Retrieves the value of a document from the collection.
Args:
key (str): The key for the document to retrieve.
opts (:class:`~couchbase.options.GetOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.GetOptions`
Returns:
:class:`~couchbase.result.GetResult`: An instance of :class:`~couchbase.result.GetResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
Examples:
Simple get operation::
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
res = collection.get('airline_10')
print(f'Document value: {res.content_as[dict]}')
Simple get operation with options::
from datetime import timedelta
from couchbase.options import GetOptions
# ... other code ...
res = collection.get('airline_10', GetOptions(timeout=timedelta(seconds=2)))
print(f'Document value: {res.content_as[dict]}')
"""
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
transcoder = self.default_transcoder
final_args['transcoder'] = transcoder
return self._get_internal(key, **final_args)
@BlockingWrapper.block_and_decode(GetResult)
def _get_internal(
self,
key, # type: str
**kwargs, # type: Dict[str, Any]
) -> GetResult:
""" **Internal Operation**
Internal use only. Use :meth:`Collection.get` instead.
"""
return super().get(key, **kwargs)
def get_any_replica(self,
key, # type: str
*opts, # type: GetAnyReplicaOptions
**kwargs, # type: Dict[str, Any]
) -> GetReplicaResult:
"""Retrieves the value of a document from the collection leveraging both active and all available replicas returning
the first available.
Args:
key (str): The key for the document to retrieve.
opts (:class:`~couchbase.options.GetAnyReplicaOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.GetAnyReplicaOptions`
Returns:
:class:`~couchbase.result.GetReplicaResult`: An instance of :class:`~couchbase.result.GetReplicaResult`.
Raises:
:class:`~couchbase.exceptions.DocumentUnretrievableException`: If the key provided does not exist
on the server.
Examples:
Simple get_any_replica operation::
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
res = collection.get_any_replica('airline_10')
print(f'Document is replica: {res.is_replica}')
print(f'Document value: {res.content_as[dict]}')
Simple get_any_replica operation with options::
from datetime import timedelta
from couchbase.options import GetAnyReplicaOptions
# ... other code ...
res = collection.get_any_replica('airline_10', GetAnyReplicaOptions(timeout=timedelta(seconds=5)))
print(f'Document is replica: {res.is_replica}')
print(f'Document value: {res.content_as[dict]}')
"""
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
transcoder = self.default_transcoder
final_args['transcoder'] = transcoder
return self._get_any_replica_internal(key, **final_args)
@BlockingWrapper.block_and_decode(GetReplicaResult)
def _get_any_replica_internal(
self,
key, # type: str
**kwargs, # type: Dict[str, Any]
) -> GetReplicaResult:
""" **Internal Operation**
Internal use only. Use :meth:`Collection.get_any_replica` instead.
"""
return super().get_any_replica(key, **kwargs)
def get_all_replicas(self,
key, # type: str
*opts, # type: GetAllReplicasOptions
**kwargs, # type: Dict[str, Any]
) -> Iterable[GetReplicaResult]:
"""Retrieves the value of a document from the collection returning both active and all available replicas.
Args:
key (str): The key for the document to retrieve.
opts (:class:`~couchbase.options.GetAllReplicasOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.GetAllReplicasOptions`
Returns:
Iterable[:class:`~couchbase.result.GetReplicaResult`]: A stream of
:class:`~couchbase.result.GetReplicaResult` representing both active and replicas of the document retrieved.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
Examples:
Simple get_all_replicas operation::
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
result = collection.get_all_replicas('airline_10')
for res in results:
print(f'Document is replica: {res.is_replica}')
print(f'Document value: {res.content_as[dict]}')
Simple get_all_replicas operation with options::
from datetime import timedelta
from couchbase.options import GetAllReplicasOptions
# ... other code ...
result = collection.get_all_replicas('airline_10', GetAllReplicasOptions(timeout=timedelta(seconds=10)))
for res in result:
print(f'Document is replica: {res.is_replica}')
print(f'Document value: {res.content_as[dict]}')
Stream get_all_replicas results::
from datetime import timedelta
from couchbase.options import GetAllReplicasOptions
# ... other code ...
result = collection.get_all_replicas('airline_10', GetAllReplicasOptions(timeout=timedelta(seconds=10)))
while True:
try:
res = next(result)
print(f'Document is replica: {res.is_replica}')
print(f'Document value: {res.content_as[dict]}')
except StopIteration:
print('Done streaming replicas.')
break
"""
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
transcoder = self.default_transcoder
final_args['transcoder'] = transcoder
return self._get_all_replicas_internal(key, **final_args)
@BlockingWrapper.block_and_decode(GetReplicaResult)
def _get_all_replicas_internal(
self,
key, # type: str
**kwargs, # type: Dict[str, Any]
) -> Iterable[GetReplicaResult]:
""" **Internal Operation**
Internal use only. Use :meth:`Collection.get_all_replicas` instead.
"""
return super().get_all_replicas(key, **kwargs)
@BlockingWrapper.block(ExistsResult)
def exists(
self,
key, # type: str
*opts, # type: ExistsOptions
**kwargs, # type: Dict[str, Any]
) -> ExistsResult:
"""Checks whether a specific document exists or not.
Args:
key (str): The key for the document to check existence.
opts (:class:`~couchbase.options.ExistsOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.ExistsOptions`
Returns:
:class:`~couchbase.result.ExistsResult`: An instance of :class:`~couchbase.result.ExistsResult`.
Examples:
Simple exists operation::
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
key = 'airline_10'
res = collection.exists(key)
print(f'Document w/ key - {key} {"exists" if res.exists else "does not exist"}')
Simple exists operation with options::
from datetime import timedelta
from couchbase.options import ExistsOptions
# ... other code ...
key = 'airline_10'
res = collection.exists(key, ExistsOptions(timeout=timedelta(seconds=2)))
print(f'Document w/ key - {key} {"exists" if res.exists else "does not exist"}')
"""
return super().exists(key, *opts, **kwargs)
@BlockingWrapper.block(MutationResult)
def insert(
self, # type: "Collection"
key, # type: str
value, # type: JSONType
*opts, # type: InsertOptions
**kwargs, # type: Dict[str, Any]
) -> MutationResult:
"""Inserts a new document to the collection, failing if the document already exists.
Args:
key (str): Document key to insert.
value (JSONType): The value of the document to insert.
opts (:class:`~couchbase.options.InsertOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.InsertOptions`
Returns:
:class:`~couchbase.result.MutationResult`: An instance of :class:`~couchbase.result.MutationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentExistsException`: If the document already exists on the
server.
Examples:
Simple insert operation::
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
key = 'airline_8091'
airline = {
"type": "airline",
"id": 8091,
"callsign": "CBS",
"iata": None,
"icao": None,
"name": "Couchbase Airways",
}
res = collection.insert(key, doc)
Simple insert operation with options::
from couchbase.durability import DurabilityLevel, ServerDurability
from couchbase.options import InsertOptions
# ... other code ...
key = 'airline_8091'
airline = {
"type": "airline",
"id": 8091,
"callsign": "CBS",
"iata": None,
"icao": None,
"name": "Couchbase Airways",
}
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
res = collection.insert(key, doc, InsertOptions(durability=durability))
"""
return super().insert(key, value, *opts, **kwargs)
@BlockingWrapper.block(MutationResult)
def upsert(
self,
key, # type: str
value, # type: JSONType
*opts, # type: UpsertOptions
**kwargs, # type: Dict[str, Any]
) -> MutationResult:
"""Upserts a document to the collection. This operation succeeds whether or not the document already exists.
Args:
key (str): Document key to upsert.
value (JSONType): The value of the document to upsert.
opts (:class:`~couchbase.options.UpsertOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.UpsertOptions`
Returns:
:class:`~couchbase.result.MutationResult`: An instance of :class:`~couchbase.result.MutationResult`.
Examples:
Simple upsert operation::
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
key = 'airline_8091'
airline = {
"type": "airline",
"id": 8091,
"callsign": "CBS",
"iata": None,
"icao": None,
"name": "Couchbase Airways",
}
res = collection.upsert(key, doc)
Simple upsert operation with options::
from couchbase.durability import DurabilityLevel, ServerDurability
from couchbase.options import UpsertOptions
# ... other code ...
key = 'airline_8091'
airline = {
"type": "airline",
"id": 8091,
"callsign": "CBS",
"iata": None,
"icao": None,
"name": "Couchbase Airways",
}
durability = ServerDurability(level=DurabilityLevel.MAJORITY)
res = collection.upsert(key, doc, InsertOptions(durability=durability))
"""
return super().upsert(key, value, *opts, **kwargs)
@BlockingWrapper.block(MutationResult)
def replace(self,
key, # type: str
value, # type: JSONType
*opts, # type: ReplaceOptions
**kwargs, # type: Dict[str, Any]
) -> MutationResult:
"""Replaces the value of an existing document. Failing if the document does not exist.
Args:
key (str): Document key to replace.
value (JSONType): The value of the document to replace.
opts (:class:`~couchbase.options.ReplaceOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.ReplaceOptions`
Returns:
:class:`~couchbase.result.MutationResult`: An instance of :class:`~couchbase.result.MutationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the document does not exist on the
server.
Examples:
Simple replace operation::
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
key = 'airline_8091'
res = collection.get(key)
content = res.content_as[dict]
airline["name"] = "Couchbase Airways!!"
res = collection.replace(key, doc)
Simple replace operation with options::
from couchbase.durability import DurabilityLevel, ServerDurability
from couchbase.options import ReplaceOptions
# ... other code ...
key = 'airline_8091'
res = collection.get(key)
content = res.content_as[dict]
airline["name"] = "Couchbase Airways!!"
durability = ServerDurability(level=DurabilityLevel.MAJORITY)
res = collection.replace(key, doc, InsertOptions(durability=durability))
"""
return super().replace(key, value, *opts, **kwargs)
@BlockingWrapper.block(MutationResult)
def remove(self,
key, # type: str
*opts, # type: RemoveOptions
**kwargs, # type: Dict[str, Any]
) -> MutationResult:
"""Removes an existing document. Failing if the document does not exist.
Args:
key (str): Key for the document to remove.
opts (:class:`~couchbase.options.RemoveOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.RemoveOptions`
Returns:
:class:`~couchbase.result.MutationResult`: An instance of :class:`~couchbase.result.MutationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the document does not exist on the
server.
Examples:
Simple remove operation::
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
res = collection.remove('airline_10')
Simple remove operation with options::
from couchbase.durability import DurabilityLevel, ServerDurability
from couchbase.options import RemoveOptions
# ... other code ...
durability = ServerDurability(level=DurabilityLevel.MAJORITY)
res = collection.remove('airline_10', RemoveOptions(durability=durability))
"""
return super().remove(key, *opts, **kwargs)
@BlockingWrapper.block(MutationResult)
def touch(self,
key, # type: str
expiry, # type: timedelta
*opts, # type: TouchOptions
**kwargs, # type: Dict[str, Any]
) -> MutationResult:
"""Updates the expiry on an existing document.
Args:
key (str): Key for the document to touch.
expiry (timedelta): The new expiry for the document.
opts (:class:`~couchbase.options.TouchOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.TouchOptions`
Returns:
:class:`~couchbase.result.MutationResult`: An instance of :class:`~couchbase.result.MutationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the document does not exist on the
server.
Examples:
Simple touch operation::
from datetime import timedelta
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
res = collection.touch('airline_10', timedelta(seconds=300))
Simple touch operation with options::
from datetime import timedelta
from couchbase.options import TouchOptions
# ... other code ...
res = collection.touch('airline_10',
timedelta(seconds=300),
TouchOptions(timeout=timedelta(seconds=2)))
"""
return super().touch(key, expiry, *opts, **kwargs)
def get_and_touch(self,
key, # type: str
expiry, # type: timedelta
*opts, # type: GetAndTouchOptions
**kwargs, # type: Dict[str, Any]
) -> GetResult:
"""Retrieves the value of the document and simultanously updates the expiry time for the same document.
Args:
key (str): The key for the document retrieve and set expiry time.
expiry (timedelta): The new expiry to apply to the document.
opts (:class:`~couchbase.options.GetAndTouchOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.GetAndTouchOptions`
Returns:
:class:`~couchbase.result.GetResult`: An instance of :class:`~couchbase.result.GetResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
Examples:
Simple get and touch operation::
from datetime import timedelta
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
key = 'airline_10'
res = collection.get_and_touch(key, timedelta(seconds=20))
print(f'Document w/ updated expiry: {res.content_as[dict]}')
Simple get and touch operation with options::
from datetime import timedelta
from couchbase.options import GetAndTouchOptions
# ... other code ...
key = 'airline_10'
res = collection.get_and_touch(key,
timedelta(seconds=20),
GetAndTouchOptions(timeout=timedelta(seconds=2)))
print(f'Document w/ updated expiry: {res.content_as[dict]}')
"""
# add to kwargs for conversion to int
kwargs["expiry"] = expiry
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
transcoder = self.default_transcoder
final_args['transcoder'] = transcoder
return self._get_and_touch_internal(key, **final_args)
@BlockingWrapper.block_and_decode(GetResult)
def _get_and_touch_internal(self,
key, # type: str
**kwargs, # type: Dict[str, Any]
) -> GetResult:
""" **Internal Operation**
Internal use only. Use :meth:`Collection.get_and_touch` instead.
"""
return super().get_and_touch(key, **kwargs)
def get_and_lock(
self,
key, # type: str
lock_time, # type: timedelta
*opts, # type: GetAndLockOptions
**kwargs, # type: Dict[str, Any]
) -> GetResult:
"""Locks a document and retrieves the value of that document at the time it is locked.
Args:
key (str): The key for the document to lock and retrieve.
lock_time (timedelta): The amount of time to lock the document.
opts (:class:`~couchbase.options.GetAndLockOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.GetAndLockOptions`
Returns:
:class:`~couchbase.result.GetResult`: An instance of :class:`~couchbase.result.GetResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
Examples:
Simple get and lock operation::
from datetime import timedelta
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
key = 'airline_10'
res = collection.get_and_lock(key, timedelta(seconds=20))
print(f'Locked document: {res.content_as[dict]}')
Simple get and lock operation with options::
from datetime import timedelta
from couchbase.options import GetAndLockOptions
# ... other code ...
key = 'airline_10'
res = collection.get_and_lock(key,
timedelta(seconds=20),
GetAndLockOptions(timeout=timedelta(seconds=2)))
print(f'Locked document: {res.content_as[dict]}')
"""
# add to kwargs for conversion to int
kwargs["lock_time"] = lock_time
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
transcoder = self.default_transcoder
final_args['transcoder'] = transcoder
return self._get_and_lock_internal(key, **final_args)
@BlockingWrapper.block_and_decode(GetResult)
def _get_and_lock_internal(self,
key, # type: str
**kwargs, # type: Dict[str, Any]
) -> GetResult:
""" **Internal Operation**
Internal use only. Use :meth:`Collection.get_and_lock` instead.
"""
return super().get_and_lock(key, **kwargs)
@BlockingWrapper.block(None)
def unlock(self,
key, # type: str
cas, # type: int
*opts, # type: UnlockOptions
**kwargs, # type: Dict[str, Any]
) -> None:
"""Unlocks a previously locked document.
Args:
key (str): The key for the document to unlock.
cas (int): The CAS of the document, used to validate lock ownership.
opts (:class:`couchbaseoptions.UnlockOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.UnlockOptions`
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
:class:`~couchbase.exceptions.DocumentLockedException`: If the provided cas is invalid.
Examples:
Simple unlock operation::
from datetime import timedelta
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
key = 'airline_10'
res = collection.get_and_lock(key, timedelta(seconds=5))
collection.unlock(key, res.cas)
# this should be okay once document is unlocked
collection.upsert(key, res.content_as[dict])
"""
return super().unlock(key, cas, *opts, **kwargs)
def lookup_in(
self,
key, # type: str
spec, # type: Iterable[Spec]
*opts, # type: LookupInOptions
**kwargs, # type: Dict[str, Any]
) -> LookupInResult:
"""Performs a lookup-in operation against a document, fetching individual fields or information
about specific fields inside the document value.
Args:
key (str): The key for the document look in.
spec (Iterable[:class:`~couchbase.subdocument.Spec`]): A list of specs describing the data to fetch
from the document.
opts (:class:`~couchbase.options.LookupInOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.LookupInOptions`
Returns:
:class:`~couchbase.result.LookupInResult`: An instance of :class:`~couchbase.result.LookupInResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
Examples:
Simple look-up in operation::
import couchbase.subdocument as SD
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('hotel')
key = 'hotel_10025'
res = collection.lookup_in(key, (SD.get("geo"),))
print(f'Hotel {key} coordinates: {res.content_as[dict](0)}')
Simple look-up in operation with options::
from datetime import timedelta
import couchbase.subdocument as SD
from couchbase.options import LookupInOptions
# ... other code ...
key = 'hotel_10025'
res = collection.lookup_in(key,
(SD.get("geo"),),
LookupInOptions(timeout=timedelta(seconds=2)))
print(f'Hotel {key} coordinates: {res.content_as[dict](0)}')
"""
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
transcoder = self.default_transcoder
final_args['transcoder'] = transcoder
return self._lookup_in_internal(key, spec, **final_args)
@BlockingWrapper.block_and_decode(LookupInResult)
def _lookup_in_internal(
self,
key, # type: str
spec, # type: Iterable[Spec]
**kwargs, # type: Dict[str, Any]
) -> LookupInResult:
""" **Internal Operation**
Internal use only. Use :meth:`Collection.lookup_in` instead.
"""
return super().lookup_in(key, spec, **kwargs)
def lookup_in_any_replica(
self,
key, # type: str
spec, # type: Iterable[Spec]
*opts, # type: LookupInAnyReplicaOptions
**kwargs, # type: Dict[str, Any]
) -> LookupInReplicaResult:
"""Performs a lookup-in operation against a document, fetching individual fields or information
about specific fields inside the document value. It leverages both active and all available replicas
returning the first available
Args:
key (str): The key for the document look in.
spec (Iterable[:class:`~couchbase.subdocument.Spec`]): A list of specs describing the data to fetch
from the document.
opts (:class:`~couchbase.options.LookupInAnyReplicaOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.LookupInAnyReplicaOptions`
Returns:
:class:`~couchbase.result.LookupInReplicaResult`: An instance of :class:`~couchbase.result.LookupInReplicaResult`.
Raises:
:class:`~couchbase.exceptions.DocumentUnretrievableException`: If the key provided does not exist
on the server.
Examples:
Simple lookup_in_any_replica operation::
import couchbase.subdocument as SD
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('hotel')
key = 'hotel_10025'
res = collection.lookup_in_any_replica(key, (SD.get("geo"),))
print(f'Hotel {key} coordinates: {res.content_as[dict](0)}')
Simple lookup_in_any_replica operation with options::
from datetime import timedelta
import couchbase.subdocument as SD
from couchbase.options import LookupInAnyReplicaOptions
# ... other code ...
key = 'hotel_10025'
res = collection.lookup_in_any_replica(key,
(SD.get("geo"),),
LookupInAnyReplicaOptions(timeout=timedelta(seconds=2)))
print(f'Document is replica: {res.is_replica}')
print(f'Hotel {key} coordinates: {res.content_as[dict](0)}')
""" # noqa: E501
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
transcoder = self.default_transcoder
final_args['transcoder'] = transcoder
return self._lookup_in_any_replica_internal(key, spec, **final_args)
@BlockingWrapper.block_and_decode(LookupInReplicaResult)
def _lookup_in_any_replica_internal(
self,
key, # type: str
spec, # type: Iterable[Spec]
**kwargs, # type: Dict[str, Any]
) -> LookupInReplicaResult:
""" **Internal Operation**
Internal use only. Use :meth:`Collection.lookup_in` instead.
"""
return super().lookup_in_any_replica(key, spec, **kwargs)
def lookup_in_all_replicas(
self,
key, # type: str
spec, # type: Iterable[Spec]
*opts, # type: LookupInAllReplicasOptions
**kwargs, # type: Any
) -> Iterable[LookupInReplicaResult]:
"""Performs a lookup-in operation against a document, fetching individual fields or information
about specific fields inside the document value, returning results from both active and all available replicas
Args:
key (str): The key for the document look in.
spec (Iterable[:class:`~couchbase.subdocument.Spec`]): A list of specs describing the data to fetch
from the document.
opts (:class:`~couchbase.options.LookupInAllReplicasOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.LookupInAllReplicasOptions`
Returns:
Iterable[:class:`~couchbase.result.LookupInReplicaResult`]: A stream of
:class:`~couchbase.result.LookupInReplicaResult` representing both active and replicas of the sub-document
retrieved.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
Examples:
Simple lookup_in_all_replicas operation::
import couchbase.subdocument as SD
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('hotel')
key = 'hotel_10025'
results = collection.lookup_in_all_replicas(key, (SD.get("geo"),))
for res in results:
print(f'Document is replica: {res.is_replica}')
print(f'Hotel {key} coordinates: {res.content_as[dict](0)}')
Simple lookup_in_all_replicas operation with options::
import couchbase.subdocument as SD
from datetime import timedelta
from couchbase.options import LookupInAllReplicasOptions
# ... other code ...
key = 'hotel_10025'
results = collection.lookup_in_all_replicas(key,
(SD.get("geo"),),
LookupInAllReplicasOptions(timeout=timedelta(seconds=2)))
for res in results:
print(f'Document is replica: {res.is_replica}')
print(f'Hotel {key} coordinates: {res.content_as[dict](0)}')
Stream lookup_in_all_replicas results::
from datetime import timedelta
from couchbase.options import GetAllReplicasOptions
# ... other code ...
key = 'hotel_10025'
results = collection.lookup_in_all_replicas(key,
(SD.get("geo"),),
LookupInAllReplicasOptions(timeout=timedelta(seconds=2)))
while True:
try:
res = next(results)
print(f'Document is replica: {res.is_replica}')
print(f'Hotel {key} coordinates: {res.content_as[dict](0)}')
except StopIteration:
print('Done streaming replicas.')
break
"""
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
transcoder = self.default_transcoder
final_args['transcoder'] = transcoder
return self._lookup_in_all_replicas_internal(key, spec, **final_args)
@BlockingWrapper.block_and_decode(LookupInReplicaResult)
def _lookup_in_all_replicas_internal(
self,
key, # type: str
spec, # type: Iterable[Spec]
**kwargs, # type: Dict[str, Any]
) -> Iterable[LookupInReplicaResult]:
""" **Internal Operation**
Internal use only. Use :meth:`Collection.lookup_in` instead.
"""
return super().lookup_in_all_replicas(key, spec, **kwargs)
@BlockingWrapper.block(MutateInResult)
def mutate_in(
self,
key, # type: str
spec, # type: Iterable[Spec]
*opts, # type: MutateInOptions
**kwargs, # type: Dict[str, Any]
) -> MutateInResult:
"""Performs a mutate-in operation against a document. Allowing atomic modification of specific fields
within a document. Also enables access to document extended-attributes (i.e. xattrs).
Args:
key (str): The key for the document look in.
spec (Iterable[:class:`~couchbase.subdocument.Spec`]): A list of specs describing the operations to
perform on the document.
opts (:class:`~couchbase.options.MutateInOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.MutateInOptions`
Returns:
:class:`~couchbase.result.MutateInResult`: An instance of :class:`~couchbase.result.MutateInResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
Examples:
Simple mutate-in operation::
import couchbase.subdocument as SD
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('hotel')
key = 'hotel_10025'
res = collection.mutate_in(key, (SD.replace("city", "New City"),))
Simple mutate-in operation with options::
from datetime import timedelta
import couchbase.subdocument as SD
from couchbase.options import MutateInOptions
# ... other code ...
key = 'hotel_10025'
res = collection.mutate_in(key,
(SD.replace("city", "New City"),),
MutateInOptions(timeout=timedelta(seconds=2)))
"""
return super().mutate_in(key, spec, *opts, **kwargs)
def scan(self, scan_type, # type: ScanType
*opts, # type: ScanOptions
**kwargs, # type: Dict[str, Any]
) -> ScanResultIterable:
"""Execute a key-value range scan operation from the collection.
**VOLATILE** This API is subject to change at any time.
Args:
scan_type (:class:`~couchbase.kv_range_scan.ScanType`): Either a :class:`~couchbase.kv_range_scan.RangeScan`,
:class:`~couchbase.kv_range_scan.PrefixScan` or
:class:`~couchbase.kv_range_scan.SamplingScan` instance.
opts (:class:`~couchbase.options.ScanOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.ScanOptions`
Raises:
:class:`~couchbase.exceptions.InvalidArgumentException`: If scan_type is not either a RangeScan or SamplingScan instance.
:class:`~couchbase.exceptions.InvalidArgumentException`: If sort option is provided and is incorrect type.
:class:`~couchbase.exceptions.InvalidArgumentException`: If consistent_with option is provided and is not a
Returns:
:class:`~couchbase.result.ScanResultIterable`: An instance of :class:`~couchbase.result.ScanResultIterable`.
Examples:
Simple range scan operation::
from couchbase.kv_range_scan import RangeScan
from couchbase.options import ScanOptions
# ... other code ...
bucket = cluster.bucket('travel-sample')
collection = bucket.scope('inventory').collection('airline')
scan_type = RangeScan(ScanTerm('airline-00'), ScanTerm('airline-99'))
scan_iter = collection.scan(scan_type, ScanOptions(ids_only=True))
for res in scan_iter:
print(res)
""" # noqa: E501
final_args = forward_args(kwargs, *opts)
transcoder = final_args.get('transcoder', None)
if not transcoder:
final_args['transcoder'] = self.default_transcoder
scan_args = super().build_scan_args(scan_type, **final_args)
range_scan_request = RangeScanRequest(**scan_args)
return ScanResultIterable(range_scan_request)
def binary(self) -> BinaryCollection:
"""Creates a BinaryCollection instance, allowing access to various binary operations
possible against a collection.
.. seealso::
:class:`~couchbase.binary_collection.BinaryCollection`
Returns:
:class:`~couchbase.binary_collection.BinaryCollection`: A BinaryCollection instance.
"""
return BinaryCollection(self)
@BlockingWrapper.block(MutationResult)
def _append(
self,
key, # type: str
value, # type: Union[str,bytes,bytearray]
*opts, # type: AppendOptions
**kwargs, # type: Dict[str, Any]
) -> MutationResult:
""" **Internal Operation**
Internal use only. Use :meth:`.BinaryCollection.append` instead.
"""
return super().append(key, value, *opts, **kwargs)
@BlockingWrapper.block(MutationResult)
def _prepend(
self,
key, # type: str
value, # type: Union[str,bytes,bytearray]
*opts, # type: PrependOptions
**kwargs, # type: Dict[str, Any]
) -> MutationResult:
""" **Internal Operation**
Internal use only. Use :meth:`.BinaryCollection.prepend` instead.
"""
return super().prepend(key, value, *opts, **kwargs)
@BlockingWrapper.block(CounterResult)
def _increment(
self,
key, # type: str
*opts, # type: IncrementOptions
**kwargs, # type: Dict[str, Any]
) -> CounterResult:
""" **Internal Operation**
Internal use only. Use :meth:`.BinaryCollection.increment` instead.
"""
return super().increment(key, *opts, **kwargs)
@BlockingWrapper.block(CounterResult)
def _decrement(
self,
key, # type: str
*opts, # type: DecrementOptions
**kwargs, # type: Dict[str, Any]
) -> CounterResult:
""" **Internal Operation**
Internal use only. Use :meth:`.BinaryCollection.decrement` instead.
"""
return super().decrement(key, *opts, **kwargs)
def couchbase_list(self, key # type: str
) -> CouchbaseList:
"""Returns a CouchbaseList permitting simple list storage in a document.
.. seealso::
:class:`~couchbase.datastructures.CouchbaseList`
Returns:
:class:`~couchbase.datastructures.CouchbaseList`: A CouchbaseList instance.
"""
return CouchbaseList(key, self)
@BlockingWrapper._dsop(create_type='list')
def list_append(self, key, # type: str
value, # type: JSONType
create=False, # type: Optional[bool]
**kwargs, # type: Dict[str, Any]
) -> OperationResult:
"""Add an item to the end of a list.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseList.append`
instead.
Args:
key (str): The key for the list document.
value (JSONType): The value to append to the list.
create (bool, optional): Whether the list should be created if it does not exist.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`couchbase~.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
op = array_append('', value)
sd_res = self.mutate_in(key, (op,), **kwargs)
return OperationResult(sd_res.cas, sd_res.mutation_token())
@BlockingWrapper._dsop(create_type='list')
def list_prepend(self, key, # type: str
value, # type: JSONType
create=False, # type: Optional[bool]
**kwargs, # type: Dict[str, Any]
) -> OperationResult:
""" Add an item to the beginning of a list.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseList.prepend`
instead.
Args:
key (str): The key for the list document.
value (JSONType): The value to prepend to the list.
create (bool, optional): Whether the list should be created if it does not exist.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
op = array_prepend('', value)
sd_res = self.mutate_in(key, (op,), **kwargs)
return OperationResult(sd_res.cas, sd_res.mutation_token())
@BlockingWrapper._dsop()
def list_set(self, key, # type: str
index, # type: int
value, # type: JSONType
**kwargs # type: Dict[str, Any]
) -> OperationResult:
"""Sets an item within a list at a given position.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseList.set_at`
instead.
Args:
key (str): The key for the list document.
index (int): The position to replace.
value (JSONType): The value to prepend to the list.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
IndexError: If the index is out of bounds.
"""
op = replace(f'[{index}]', value)
sd_res = self.mutate_in(key, (op,), **kwargs)
return OperationResult(sd_res.cas, sd_res.mutation_token())
@BlockingWrapper._dsop()
def list_get(self, key, # type: str
index, # type: int
**kwargs # type: Dict[str, Any]
) -> Any:
"""Get a specific element within a list.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseList.get_at`
instead.
Args:
key (str): The key for the list document.
index (int): The position to retrieve.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
Any: The value of the element at the specified index.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
IndexError: If the index is out of bounds.
"""
op = subdoc_get(f'[{index}]')
sd_res = self.lookup_in(key, (op,), **kwargs)
return sd_res.value[0].get("value", None)
@BlockingWrapper._dsop()
def list_remove(self, key, # type: str
index, # type: int
**kwargs # type: Dict[str, Any]
) -> OperationResult:
"""Remove the element at a specific index from a list.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseList.remove_at`
instead.
Args:
key (str): The key for the list document.
index (int): The position to remove.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
IndexError: If the index is out of bounds.
"""
op = subdoc_remove(f'[{index}]')
sd_res = self.mutate_in(key, (op,), **kwargs)
return OperationResult(sd_res.cas, sd_res.mutation_token())
@BlockingWrapper._dsop()
def list_size(self, key, # type: str
**kwargs # type: Dict[str, Any]
) -> int:
"""Returns the number of items in the list.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseList.size`
instead.
Args:
key (str): The key for the list document.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
int: The number of items in the list.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
op = count('')
sd_res = self.lookup_in(key, (op,), **kwargs)
return sd_res.value[0].get("value", None)
def couchbase_map(self, key # type: str
) -> CouchbaseMap:
"""Returns a CouchbaseMap permitting simple map storage in a document.
.. seealso::
:class:`~couchbase.datastructures.CouchbaseMap`
Returns:
:class:`~couchbase.datastructures.CouchbaseMap`: A CouchbaseMap instance.
"""
return CouchbaseMap(key, self)
@BlockingWrapper._dsop(create_type='dict')
def map_add(self,
key, # type: str
mapkey, # type: str
value, # type: Any
create=False, # type: Optional[bool]
**kwargs # type: Dict[str, Any]
) -> OperationResult:
"""Set a value for a key in a map.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseMap.add`
instead.
Args:
key (str): The key for the map document.
mapkey (str): The key in the map to set.
value (Any): The value to use.
create (bool, optional): Whether the map should be created if it does not exist.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
op = subdoc_upsert(mapkey, value)
sd_res = self.mutate_in(key, (op,), **kwargs)
return OperationResult(sd_res.cas, sd_res.mutation_token())
@BlockingWrapper._dsop()
def map_get(self,
key, # type: str
mapkey, # type: str
**kwargs # type: Dict[str, Any]
) -> Any:
"""Retrieve a value from a map.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseMap.get`
instead.
Args:
key (str): The key for the map document.
mapkey (str): The key in the map to set.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
Any: The value of the specified key.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
op = subdoc_get(mapkey)
sd_res = self.lookup_in(key, (op,), **kwargs)
return sd_res.value[0].get("value", None)
@BlockingWrapper._dsop()
def map_remove(self,
key, # type: str
mapkey, # type: str
**kwargs # type: Dict[str, Any]
) -> OperationResult:
"""Remove an item from a map.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseMap.remove`
instead.
Args:
key (str): The key for the map document.
mapkey (str): The key in the map to set.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
op = subdoc_remove(mapkey)
sd_res = self.mutate_in(key, (op,), **kwargs)
return OperationResult(sd_res.cas, sd_res.mutation_token())
@BlockingWrapper._dsop()
def map_size(self,
key, # type: str
**kwargs # type: Dict[str, Any]
) -> int:
"""Get the number of items in the map.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseMap.remove`
instead.
Args:
key (str): The key for the map document.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
int: The number of items in the map.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
op = count('')
sd_res = self.lookup_in(key, (op,), **kwargs)
return sd_res.value[0].get("value", None)
def couchbase_set(self, key # type: str
) -> CouchbaseSet:
"""Returns a CouchbaseSet permitting simple map storage in a document.
.. seealso::
:class:`~couchbase.datastructures.CouchbaseSet`
Returns:
:class:`~couchbase.datastructures.CouchbaseSet`: A CouchbaseSet instance.
"""
return CouchbaseSet(key, self)
@BlockingWrapper._dsop(create_type='list')
def set_add(self,
key, # type: str
value, # type: Any
create=False, # type: Optional[bool]
**kwargs # type: Dict[str, Any]
) -> Optional[OperationResult]:
"""Add an item to a set if the item does not yet exist.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseSet.add`
instead.
Args:
key (str): The key for the set document.
value (Any): The value to add to the set.
create (bool, optional): Whether the set should be created if it does not exist.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
op = array_addunique('', value)
try:
sd_res = self.mutate_in(key, (op,), **kwargs)
return OperationResult(sd_res.cas, sd_res.mutation_token())
except PathExistsException:
pass
@BlockingWrapper._dsop()
def set_remove(self,
key, # type: str
value, # type: Any
**kwargs # type: Dict[str, Any]
) -> Optional[OperationResult]:
"""Remove an item from a set.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseSet.remove`
instead.
Args:
key (str): The key for the set document.
value (Any): The value to remove from the set.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
while True:
rv = self.get(key, **kwargs)
try:
ix = rv.value.index(value)
kwargs['cas'] = rv.cas
return self.list_remove(key, ix, **kwargs)
except DocumentExistsException:
pass
except ValueError:
return
def set_size(self,
key, # type: str
**kwargs # type: Dict[str, Any]
) -> int:
"""Get the length of a set.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseSet.size`
instead.
Args:
key (str): The key for the set document.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
int: The length of a set.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
return self.list_size(key, **kwargs)
def set_contains(self,
key, # type: str
value, # type: Any
**kwargs # type: Dict[str, Any]
) -> bool:
"""Determine if an item exists in a set
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseSet.contains`
instead.
Args:
key (str): The key for the set document.
value (Any): The value to check for.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
bool: True if the set contains the specified value. False othwerwise.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
rv = self.get(key, **kwargs)
return value in rv.value
def couchbase_queue(self, key # type: str
) -> CouchbaseQueue:
"""Returns a CouchbaseQueue permitting simple map storage in a document.
.. seealso::
:class:`~couchbase.datastructures.CouchbaseQueue`
Returns:
:class:`~couchbase.datastructures.CouchbaseQueue`: A CouchbaseQueue instance.
"""
return CouchbaseQueue(key, self)
@BlockingWrapper._dsop(create_type='list')
def queue_push(self,
key, # type: str
value, # type: Any
create=False, # type: Optional[bool]
**kwargs # type: Dict[str, Any]
) -> OperationResult:
"""Add an item to the end of a queue.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseQueue.push`
instead.
Args:
key (str): The key for the queue document.
value (Any): The value to add.
create (bool, optional): Whether the queue should be created if it does not exist.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
return self.list_prepend(key, value, **kwargs)
@BlockingWrapper._dsop()
def queue_pop(self,
key, # type: str
**kwargs # type: Dict[str, Any]
) -> OperationResult:
"""Remove and return the first item queue.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseQueue.pop`
instead.
Args:
key (str): The key for the queue document.
**kwargs (Dict[str, Any]): keyword arguments that can be used as optional parameters
for this operation.
Returns:
:class:`~couchbase.result.OperationResult`: An instance of :class:`~couchbase.result.OperationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
while True:
try:
itm = self.list_get(key, -1, **kwargs)
except IndexError:
raise QueueEmpty
kwargs.update({k: v for k, v in getattr(
itm, '__dict__', {}).items() if k in {'cas'}})
try:
self.list_remove(key, -1, **kwargs)
return itm
except DocumentExistsException:
pass
except IndexError:
raise QueueEmpty
@BlockingWrapper._dsop()
def queue_size(self,
key # type: str
) -> int:
"""Get the length of a queue.
.. warning::
This method is deprecated and will be removed in a future version. Use :meth:`.CouchbaseQueue.size`
instead.
Args:
key (str): The key for the queue document.
Returns:
int: The length of the queue.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist
on the server.
"""
return self.list_size(key)
def _get_multi_mutation_transcoded_op_args(
self,
keys_and_docs, # type: Dict[str, JSONType]
*opts, # type: MutationMultiOptions
**kwargs, # type: Any
) -> Tuple[Dict[str, Any], bool]:
if not isinstance(keys_and_docs, dict):
raise InvalidArgumentException(message='Expected keys_and_docs to be a dict.')
opts_type = kwargs.pop('opts_type', None)
if not opts_type:
raise InvalidArgumentException(message='Expected options type is missing.')
final_args = get_valid_multi_args(opts_type, kwargs, *opts)
per_key_args = final_args.pop('per_key_options', None)
op_transcoder = final_args.pop('transcoder', self.default_transcoder)
op_args = {}
for key, value in keys_and_docs.items():
op_args[key] = copy(final_args)
# per key args override global args
if per_key_args and key in per_key_args:
key_transcoder = per_key_args.pop('transcoder', op_transcoder)
op_args[key].update(per_key_args[key])
transcoded_value = key_transcoder.encode_value(value)
else:
transcoded_value = op_transcoder.encode_value(value)
op_args[key]['value'] = transcoded_value
if isinstance(opts_type, ReplaceMultiOptions):
for k, v in op_args.items():
expiry = v.get('expiry', None)
preserve_expiry = v.get('preserve_expiry', False)
if expiry and preserve_expiry is True:
raise InvalidArgumentException(
message=("The expiry and preserve_expiry options cannot "
f"both be set for replace operations. Multi-op key: {k}.")
)
return_exceptions = final_args.pop('return_exceptions', True)
return op_args, return_exceptions
def _get_multi_op_args(
self,
keys, # type: List[str]
*opts, # type: NoValueMultiOptions
**kwargs, # type: Any
) -> Tuple[Dict[str, Any], bool, Dict[str, Transcoder]]:
if not isinstance(keys, list):
raise InvalidArgumentException(message='Expected keys to be a list.')
opts_type = kwargs.pop('opts_type', None)
if not opts_type:
raise InvalidArgumentException(message='Expected options type is missing.')
final_args = get_valid_multi_args(opts_type, kwargs, *opts)
op_transcoder = final_args.pop('transcoder', self.default_transcoder)
per_key_args = final_args.pop('per_key_options', None)
op_args = {}
key_transcoders = {}
for key in keys:
op_args[key] = copy(final_args)
# per key args override global args
if per_key_args and key in per_key_args:
key_transcoder = per_key_args.pop('transcoder', op_transcoder)
key_transcoders[key] = key_transcoder
op_args[key].update(per_key_args[key])
else:
key_transcoders[key] = op_transcoder
return_exceptions = final_args.pop('return_exceptions', True)
return op_args, return_exceptions, key_transcoders
def get_multi(
self,
keys, # type: List[str]
*opts, # type: GetMultiOptions
**kwargs, # type: Any
) -> MultiGetResult:
"""For each key in the provided list, retrieve the document associated with the key.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys (List[str]): The keys to use for the multiple get operations.
opts (:class:`~couchbase.options.GetMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.GetMultiOptions`
Returns:
:class:`~couchbase.result.MultiGetResult`: An instance of
:class:`~couchbase.result.MultiGetResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
Examples:
Simple get-multi operation::
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
res = collection.get_multi(keys)
for k, v in res.results.items():
print(f'Doc {k} has value: {v.content_as[dict]}')
Simple get-multi operation, raise an Exception if an Exception occurs::
from couchbase.options import GetMultiOptions
# ... other code ...
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
res = collection.get_multi(keys,
GetMultiOptions(return_exceptions=False))
for k, v in res.results.items():
print(f'Doc {k} has value: {v.content_as[dict]}')
Simple get-multi operation, individual key options::
from datetime import timedelta
from couchbase.options import GetMultiOptions, GetOptions
# ... other code ...
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
per_key_opts = {'doc1': GetOptions(timeout=timedelta(seconds=10))}
res = collection.get_multi(keys,
GetMultiOptions(per_key_options=per_key_opts))
for k, v in res.results.items():
print(f'Doc {k} has value: {v.content_as[dict]}')
"""
op_args, return_exceptions, transcoders = self._get_multi_op_args(keys,
*opts,
opts_type=GetMultiOptions,
**kwargs)
op_type = operations.GET.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
for k, v in res.raw_result.items():
if k == 'all_okay':
continue
if isinstance(v, CouchbaseBaseException):
continue
value = v.raw_result.get('value', None)
flags = v.raw_result.get('flags', None)
tc = transcoders[k]
v.raw_result['value'] = decode_value(tc, value, flags)
return MultiGetResult(res, return_exceptions)
def get_any_replica_multi(
self,
keys, # type: List[str]
*opts, # type: GetAnyReplicaMultiOptions
**kwargs, # type: Any
) -> MultiGetReplicaResult:
"""For each key in the provided list, retrieve the document associated with the key from the collection
leveraging both active and all available replicas returning the first available.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys (List[str]): The keys to use for the multiple get operations.
opts (:class:`~couchbase.options.GetAnyReplicaMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.GetAnyReplicaMultiOptions`
Returns:
:class:`~couchbase.result.MultiGetReplicaResult`: An instance of
:class:`~couchbase.result.MultiGetReplicaResult`.
Raises:
:class:`~couchbase.exceptions.DocumentUnretrievableException`: If the key provided does not exist on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
Examples:
Simple get_any_replica_multi operation::
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
res = collection.get_any_replica_multi(keys)
for k, v in res.results.items():
if v.is_replica:
print(f'Replica doc {k} has value: {v.content_as[dict]}')
else:
print(f'Active doc {k} has value: {v.content_as[dict]}')
Simple get_any_replica_multi operation, raise an Exception if an Exception occurs::
from couchbase.options import GetAnyReplicaMultiOptions
# ... other code ...
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
res = collection.get_any_replica_multi(keys,
GetAnyReplicaMultiOptions(return_exceptions=False))
for k, v in res.results.items():
if v.is_replica:
print(f'Replica doc {k} has value: {v.content_as[dict]}')
else:
print(f'Active doc {k} has value: {v.content_as[dict]}')
Simple get_any_replica_multi operation, individual key options::
from datetime import timedelta
from couchbase.options import GetAnyReplicaMultiOptions, GetAnyReplicaOptions
# ... other code ...
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
per_key_opts = {'doc1': GetAnyReplicaOptions(timeout=timedelta(seconds=10))}
res = collection.get_any_replica_multi(keys,
GetAnyReplicaMultiOptions(per_key_options=per_key_opts))
for k, v in res.results.items():
if v.is_replica:
print(f'Replica doc {k} has value: {v.content_as[dict]}')
else:
print(f'Active doc {k} has value: {v.content_as[dict]}')
"""
op_args, return_exceptions, transcoders = self._get_multi_op_args(keys,
*opts,
opts_type=GetAnyReplicaMultiOptions,
**kwargs)
op_type = operations.GET_ANY_REPLICA.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
for k, v in res.raw_result.items():
if k == 'all_okay':
continue
if isinstance(v, CouchbaseBaseException):
continue
value = v.raw_result.get('value', None)
flags = v.raw_result.get('flags', None)
tc = transcoders[k]
v.raw_result['value'] = decode_value(tc, value, flags)
return MultiGetReplicaResult(res, return_exceptions)
def get_all_replicas_multi(
self,
keys, # type: List[str]
*opts, # type: GetAllReplicasMultiOptions
**kwargs, # type: Any
) -> MultiGetReplicaResult:
"""For each key in the provided list, retrieve the document from the collection returning both
active and all available replicas.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys (List[str]): The keys to use for the multiple get operations.
opts (:class:`~couchbase.options.GetAllReplicasMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.GetAllReplicasMultiOptions`
Returns:
:class:`~couchbase.result.MultiGetReplicaResult`: An instance of
:class:`~couchbase.result.MultiGetReplicaResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
Examples:
Simple get_all_replicas_multi operation::
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
res = collection.get_all_replicas_multi(keys)
for k, docs in res.results.items():
for doc in docs:
if doc.is_replica:
print(f'Replica doc {k} has value: {doc.content_as[dict]}')
else:
print(f'Active doc {k} has value: {doc.content_as[dict]}')
Simple get_all_replicas_multi operation, raise an Exception if an Exception occurs::
from couchbase.options import GetAllReplicasMultiOptions
# ... other code ...
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
res = collection.get_all_replicas_multi(keys,
GetAllReplicasMultiOptions(return_exceptions=False))
for k, docs in res.results.items():
for doc in docs:
if doc.is_replica:
print(f'Replica doc {k} has value: {doc.content_as[dict]}')
else:
print(f'Active doc {k} has value: {doc.content_as[dict]}')
Simple get_all_replicas_multi operation, individual key options::
from datetime import timedelta
from couchbase.options import GetAllReplicasMultiOptions, GetAllReplicasOptions
# ... other code ...
collection = bucket.default_collection()
keys = ['doc1', 'doc2', 'doc3']
per_key_opts = {'doc1': GetAllReplicasOptions(timeout=timedelta(seconds=10))}
res = collection.get_all_replicas_multi(keys,
GetAllReplicasMultiOptions(per_key_options=per_key_opts))
for k, docs in res.results.items():
for doc in docs:
if doc.is_replica:
print(f'Replica doc {k} has value: {doc.content_as[dict]}')
else:
print(f'Active doc {k} has value: {doc.content_as[dict]}')
"""
op_args, return_exceptions, transcoders = self._get_multi_op_args(keys,
*opts,
opts_type=GetAllReplicasMultiOptions,
**kwargs)
op_type = operations.GET_ALL_REPLICAS.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
# all the successful results will be streamed_results, so lets
# pop those off the main result dict and re-add the key back
# transformed into a List[GetReplicaResult]
result_keys = []
for k, v in res.raw_result.items():
if k == 'all_okay' or isinstance(v, CouchbaseBaseException):
continue
result_keys.append(k)
for k in result_keys:
value = res.raw_result.pop(k)
tc = transcoders[k]
res.raw_result[k] = list(r for r in decode_replicas(tc, value, GetReplicaResult))
return MultiGetReplicaResult(res, return_exceptions)
def lock_multi(
self,
keys, # type: List[str]
lock_time, # type: timedelta
*opts, # type: LockMultiOptions
**kwargs, # type: Any
) -> MultiGetResult:
"""For each key in the provided list, lock the document associated with the key.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys (List[str]): The keys to use for the multiple lock operations.
lock_time (timedelta): The amount of time to lock the documents.
opts (:class:`~couchbase.options.LockMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.LockMultiOptions`
Returns:
:class:`~couchbase.result.MultiGetResult`: An instance of
:class:`~couchbase.result.MultiGetResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
"""
kwargs["lock_time"] = lock_time
op_args, return_exceptions, transcoders = self._get_multi_op_args(keys,
*opts,
opts_type=LockMultiOptions,
**kwargs)
op_type = operations.GET_AND_LOCK.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
for k, v in res.raw_result.items():
if k == 'all_okay':
continue
if isinstance(v, CouchbaseBaseException):
continue
value = v.raw_result.get('value', None)
flags = v.raw_result.get('flags', None)
tc = transcoders[k]
v.raw_result['value'] = decode_value(tc, value, flags)
return MultiGetResult(res, return_exceptions)
def exists_multi(
self,
keys, # type: List[str]
*opts, # type: ExistsMultiOptions
**kwargs, # type: Any
) -> MultiExistsResult:
"""For each key in the provided list, check if the document associated with the key exists.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys (List[str]): The keys to use for the multiple exists operations.
lock_time (timedelta): The amount of time to lock the documents.
opts (:class:`~couchbase.options.ExistsMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.ExistsMultiOptions`
Returns:
:class:`~couchbase.result.MultiExistsResult`: An instance of
:class:`~couchbase.result.MultiExistsResult`.
"""
op_args, return_exceptions, _ = self._get_multi_op_args(keys,
*opts,
opts_type=ExistsMultiOptions,
**kwargs)
op_type = operations.EXISTS.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiExistsResult(res, return_exceptions)
def insert_multi(
self,
keys_and_docs, # type: Dict[str, JSONType]
*opts, # type: InsertMultiOptions
**kwargs, # type: Any
) -> MultiMutationResult:
"""For each key, value pair in the provided dict, inserts a new document to the collection,
failing if the document already exists.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys_and_docs (Dict[str, JSONType]): The keys and values/docs to use for the multiple insert operations.
opts (:class:`~couchbase.options.InsertMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.InsertMultiOptions`
Returns:
:class:`~couchbase.result.MultiMutationResult`: An instance of
:class:`~couchbase.result.MultiMutationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentExistsException`: If the key provided already exists on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
"""
op_args, return_exceptions = self._get_multi_mutation_transcoded_op_args(keys_and_docs,
*opts,
opts_type=InsertMultiOptions,
**kwargs)
op_type = operations.INSERT.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiMutationResult(res, return_exceptions)
def upsert_multi(
self,
keys_and_docs, # type: Dict[str, JSONType]
*opts, # type: UpsertMultiOptions
**kwargs, # type: Any
) -> MultiMutationResult:
"""For each key, value pair in the provided dict, upserts a document to the collection. This operation
succeeds whether or not the document already exists.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys_and_docs (Dict[str, JSONType]): The keys and values/docs to use for the multiple upsert operations.
opts (:class:`~couchbase.options.UpsertMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.UpsertMultiOptions`
Returns:
:class:`~couchbase.result.MultiMutationResult`: An instance of
:class:`~couchbase.result.MultiMutationResult`.
"""
op_args, return_exceptions = self._get_multi_mutation_transcoded_op_args(keys_and_docs,
*opts,
opts_type=UpsertMultiOptions,
**kwargs)
op_type = operations.UPSERT.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiMutationResult(res, return_exceptions)
def replace_multi(
self,
keys_and_docs, # type: Dict[str, JSONType]
*opts, # type: ReplaceMultiOptions
**kwargs, # type: Any
) -> MultiMutationResult:
"""For each key, value pair in the provided dict, replaces the value of a document in the collection.
This operation fails if the document does not exist.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys_and_docs (Dict[str, JSONType]): The keys and values/docs to use for the multiple replace operations.
opts (:class:`~couchbase.options.ReplaceMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.ReplaceMultiOptions`
Returns:
:class:`~couchbase.result.MultiMutationResult`: An instance of
:class:`~couchbase.result.MultiMutationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
"""
op_args, return_exceptions = self._get_multi_mutation_transcoded_op_args(keys_and_docs,
*opts,
opts_type=ReplaceMultiOptions,
**kwargs)
op_type = operations.REPLACE.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiMutationResult(res, return_exceptions)
def remove_multi(
self,
keys, # type: List[str]
*opts, # type: RemoveMultiOptions
**kwargs, # type: Any
) -> MultiMutationResult:
"""For each key in the provided list, remove the existing document. This operation fails
if the document does not exist.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys (List[str]): The keys to use for the multiple remove operations.
opts (:class:`~couchbase.options.RemoveMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.RemoveMultiOptions`
Returns:
:class:`~couchbase.result.MultiMutationResult`: An instance of
:class:`~couchbase.result.MultiMutationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
"""
op_args, return_exceptions, _ = self._get_multi_op_args(keys,
*opts,
opts_type=RemoveMultiOptions,
**kwargs)
op_type = operations.REMOVE.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiMutationResult(res, return_exceptions)
def touch_multi(
self,
keys, # type: List[str]
expiry, # type: timedelta
*opts, # type: TouchMultiOptions
**kwargs, # type: Any
) -> MultiMutationResult:
"""For each key in the provided list, update the expiry on an existing document. This operation fails
if the document does not exist.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys (List[str]): The keys to use for the multiple touch operations.
expiry (timedelta): The new expiry for the document.
opts (:class:`~couchbase.options.TouchMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.TouchMultiOptions`
Returns:
:class:`~couchbase.result.MultiMutationResult`: An instance of
:class:`~couchbase.result.MultiMutationResult`.
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
"""
kwargs['expiry'] = expiry
op_args, return_exceptions, _ = self._get_multi_op_args(keys,
*opts,
opts_type=TouchMultiOptions,
**kwargs)
op_type = operations.TOUCH.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiMutationResult(res, return_exceptions)
def unlock_multi( # noqa: C901
self,
keys, # type: Union[MultiResultType, Dict[str, int]]
*opts, # type: UnlockMultiOptions
**kwargs, # type: Any
) -> Dict[str, Union[None, CouchbaseBaseException]]:
"""For each result in the provided :class:`~couchbase.result.MultiResultType` in the provided list,
unlocks a previously locked document. This operation fails if the document does not exist.
.. note::
This method is part of an **uncommitted** API that is unlikely to change,
but may still change as final consensus on its behavior has not yet been reached.
Args:
keys (Union[MultiResultType, Dict[str, int]]): The result from a previous multi operation.
opts (:class:`~couchbase.options.UnlockMultiOptions`): Optional parameters for this operation.
**kwargs (Dict[str, Any]): keyword arguments that can be used in place or to
override provided :class:`~couchbase.options.UnlockMultiOptions`
Returns:
Dict[str, Union[None, CouchbaseBaseException]]: Either None if operation successful or an Exception
if the operation was unsuccessful
Raises:
:class:`~couchbase.exceptions.DocumentNotFoundException`: If the key provided does not exist on the
server and the return_exceptions options is False. Otherwise the exception is returned as a
match to the key, but is not raised.
:class:`~couchbase.exceptions.DocumentLockedException`: If the provided cas is invalid and the
return_exceptions options is False. Otherwise the exception is returned as a match to the key,
but is not raised.
"""
op_keys_cas = {}
if isinstance(keys, dict):
if not all(map(lambda k: isinstance(k, str), keys.keys())):
raise InvalidArgumentException('If providing keys of type dict, all values must be type int.')
if not all(map(lambda v: isinstance(v, int), keys.values())):
raise InvalidArgumentException('If providing keys of type dict, all values must be type int.')
op_keys_cas = copy(keys)
elif isinstance(keys, (MultiGetResult, MultiMutationResult)):
for k, v in keys.results.items():
op_keys_cas[k] = v.cas
else:
raise InvalidArgumentException(
'keys type must be Union[MultiGetResult, MultiMutationResult, Dict[str, int].')
op_args, return_exceptions, _ = self._get_multi_op_args(list(op_keys_cas.keys()),
*opts,
opts_type=UnlockMultiOptions,
**kwargs)
for k, v in op_args.items():
v['cas'] = op_keys_cas[k]
op_type = operations.UNLOCK.value
res = kv_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
output = {}
for k, v in res.raw_result.items():
if k == 'all_okay':
continue
if isinstance(v, CouchbaseBaseException):
if not return_exceptions:
raise ErrorMapper.build_exception(v)
else:
output[k] = ErrorMapper.build_exception(v)
else:
output[k] = None
return output
def _get_multi_counter_op_args(
self,
keys, # type: List[str]
*opts, # type: Union[IncrementMultiOptions, DecrementMultiOptions]
**kwargs, # type: Any
) -> Tuple[Dict[str, Any], bool]:
if not isinstance(keys, list):
raise InvalidArgumentException(message='Expected keys to be a list.')
opts_type = kwargs.pop('opts_type', None)
if not opts_type:
raise InvalidArgumentException(message='Expected options type is missing.')
final_args = get_valid_multi_args(opts_type, kwargs, *opts)
global_delta, global_initial = self._get_and_validate_delta_initial(final_args)
final_args['delta'] = int(global_delta)
final_args['initial'] = int(global_initial)
per_key_args = final_args.pop('per_key_options', None)
op_args = {}
for key in keys:
op_args[key] = copy(final_args)
# per key args override global args
if per_key_args and key in per_key_args:
# need to validate delta/initial if provided per key
delta = per_key_args[key].get('delta', None)
initial = per_key_args[key].get('initial', None)
self._validate_delta_initial(delta=delta, initial=initial)
if delta:
per_key_args[key]['delta'] = int(delta)
if initial:
per_key_args[key]['initial'] = int(initial)
op_args[key].update(per_key_args[key])
return_exceptions = final_args.pop('return_exceptions', True)
return op_args, return_exceptions
def _get_multi_binary_mutation_op_args(
self,
keys_and_docs, # type: Dict[str, Union[str, bytes, bytearray]]
*opts, # type: Union[AppendMultiOptions, PrependMultiOptions]
**kwargs, # type: Any
) -> Tuple[Dict[str, Any], bool]:
if not isinstance(keys_and_docs, dict):
raise InvalidArgumentException(message='Expected keys_and_docs to be a dict.')
opts_type = kwargs.pop('opts_type', None)
if not opts_type:
raise InvalidArgumentException(message='Expected options type is missing.')
parsed_keys_and_docs = {}
for k, v in keys_and_docs.items():
if isinstance(v, str):
value = v.encode("utf-8")
elif isinstance(v, bytearray):
value = bytes(v)
else:
value = v
if not isinstance(value, bytes):
raise ValueError(
"The value provided must of type str, bytes or bytearray.")
parsed_keys_and_docs[k] = value
final_args = get_valid_multi_args(opts_type, kwargs, *opts)
per_key_args = final_args.pop('per_key_options', None)
op_args = {}
for key, value in parsed_keys_and_docs.items():
op_args[key] = copy(final_args)
# per key args override global args
if per_key_args and key in per_key_args:
op_args[key].update(per_key_args[key])
op_args[key]['value'] = value
return_exceptions = final_args.pop('return_exceptions', True)
return op_args, return_exceptions
def _append_multi(
self,
keys_and_values, # type: Dict[str, Union[str,bytes,bytearray]]
*opts, # type: AppendMultiOptions
**kwargs, # type: Dict[str, Any]
) -> MultiMutationResult:
op_args, return_exceptions = self._get_multi_binary_mutation_op_args(keys_and_values,
*opts,
opts_type=AppendMultiOptions,
**kwargs)
op_type = operations.APPEND.value
res = binary_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiMutationResult(res, return_exceptions)
def _prepend_multi(
self,
keys_and_values, # type: Dict[str, Union[str,bytes,bytearray]]
*opts, # type: PrependMultiOptions
**kwargs, # type: Dict[str, Any]
) -> MultiMutationResult:
op_args, return_exceptions = self._get_multi_binary_mutation_op_args(keys_and_values,
*opts,
opts_type=PrependMultiOptions,
**kwargs)
op_type = operations.PREPEND.value
res = binary_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiMutationResult(res, return_exceptions)
def _increment_multi(
self,
keys, # type: List[str]
*opts, # type: IncrementMultiOptions
**kwargs, # type: Dict[str, Any]
) -> MultiCounterResult:
op_args, return_exceptions = self._get_multi_counter_op_args(keys,
*opts,
opts_type=IncrementMultiOptions,
**kwargs)
op_type = operations.INCREMENT.value
res = binary_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiCounterResult(res, return_exceptions)
def _decrement_multi(
self,
keys, # type: List[str]
*opts, # type: DecrementMultiOptions
**kwargs, # type: Dict[str, Any]
) -> MultiCounterResult:
op_args, return_exceptions = self._get_multi_counter_op_args(keys,
*opts,
opts_type=DecrementMultiOptions,
**kwargs)
op_type = operations.DECREMENT.value
res = binary_multi_operation(
**self._get_connection_args(),
op_type=op_type,
op_args=op_args
)
return MultiCounterResult(res, return_exceptions)
def query_indexes(self) -> CollectionQueryIndexManager:
"""
Get a :class:`~couchbase.management.queries.CollectionQueryIndexManager` which can be used to manage the query
indexes of this cluster.
Returns:
:class:`~couchbase.management.queries.CollectionQueryIndexManager`: A :class:`~couchbase.management.queries.CollectionQueryIndexManager` instance.
""" # noqa: E501
return CollectionQueryIndexManager(self.connection, self._scope.bucket_name, self._scope.name, self.name)
@staticmethod
def default_name():
return "_default"
"""
** DEPRECATION NOTICE **
The classes below are deprecated for 3.x compatibility. They should not be used.
Instead use:
* All options should be imported from `couchbase.options`.
* All constrained int classes should be imported from `couchbase.options`.
* Scope object should be imported from `couchbase.scope`.
* Do not use the `CBCollection` class, use Collection instead.
"""
from couchbase.logic.options import AppendOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import DecrementOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import DeltaValueBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import DurabilityOptionBlockBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import ExistsOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import GetAllReplicasOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import GetAndLockOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import GetAndTouchOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import GetAnyReplicaOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import GetOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import IncrementOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import InsertOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import LookupInOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import OptionsTimeoutBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import PrependOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import RemoveOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import ReplaceOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import TouchOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import UnlockOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.options import UpsertOptionsBase # nopep8 # isort:skip # noqa: E402
from couchbase.logic.scope import ScopeLogic # nopep8 # isort:skip # noqa: E402
from couchbase.options import ConstrainedInt # nopep8 # isort:skip # noqa: E402, F401
from couchbase.options import SignedInt64 # nopep8 # isort:skip # noqa: E402, F401
@Supportability.import_deprecated('couchbase.collection', 'couchbase.scope')
class Scope(ScopeLogic):
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class AppendOptions(AppendOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class DecrementOptions(DecrementOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class DeltaValue(DeltaValueBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class DurabilityOptionBlock(DurabilityOptionBlockBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class ExistsOptions(ExistsOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class GetAllReplicasOptions(GetAllReplicasOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class GetAndTouchOptions(GetAndTouchOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class GetAndLockOptions(GetAndLockOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class GetAnyReplicaOptions(GetAnyReplicaOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class GetOptions(GetOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class IncrementOptions(IncrementOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class InsertOptions(InsertOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class LookupInOptions(LookupInOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class OptionsTimeout(OptionsTimeoutBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class PrependOptions(PrependOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class RemoveOptions(RemoveOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class ReplaceOptions(ReplaceOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class TouchOptions(TouchOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class UnlockOptions(UnlockOptionsBase): # noqa: F811
pass
@Supportability.import_deprecated('couchbase.collection', 'couchbase.options') # noqa: F811
class UpsertOptions(UpsertOptionsBase): # noqa: F811
pass
@Supportability.class_deprecated('couchbase.collection.Collection')
class CBCollection(Collection):
pass
|
6877607a7789307c3574f5dd574cc6e28a944b4a
|
0869d7edac80e8aebe951682a2cc311a083eade3
|
/Python/benchmarking/struct_deserialization.py
|
08b8b8fa9553434723ea34aeba2795b4d5319aa1
|
[
"BSD-2-Clause"
] |
permissive
|
threedworld-mit/tdw
|
7d5b4453832647733ff91ad7a7ce7ec2320454c1
|
9df96fba455b327bb360d8dd5886d8754046c690
|
refs/heads/master
| 2023-09-01T11:45:28.132298
| 2023-08-31T16:13:30
| 2023-08-31T16:13:30
| 245,492,977
| 427
| 75
|
BSD-2-Clause
| 2023-09-14T17:36:12
| 2020-03-06T18:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 849
|
py
|
struct_deserialization.py
|
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.add_ons.benchmark import Benchmark
"""
Benchmark the speed of deserializing structs (such as Vector3 and Quaternion).
"""
if __name__ == "__main__":
o_id = 0
cmds = [{"$type": "teleport_object",
"position": {"x": 0, "y": 0, "z": 0},
"id": o_id},
{"$type": "rotate_object_to",
"rotation": {"w": 1, "x": 0, "y": 0, "z": 0},
"id": o_id}]
c = Controller(launch_build=False)
b = Benchmark()
c.add_ons.append(b)
c.communicate([TDWUtils.create_empty_room(12, 12),
c.get_add_object("rh10", object_id=o_id)])
b.start()
for i in range(5000):
c.communicate(cmds)
b.stop()
print(f"FPS: {round(b.fps)}")
c.communicate({"$type": "terminate"})
|
661c77747485dc2e72dc55f04d7b9aa2ea0909db
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/cognitiveservices/custom.py
|
83ddd28c1a77a5ba5ba45f3439cfb38e689c99dd
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 11,350
|
py
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
from knack.util import CLIError
from knack.log import get_logger
from azure.mgmt.cognitiveservices.models import Account as CognitiveServicesAccount, Sku, \
VirtualNetworkRule, IpRule, NetworkRuleSet, NetworkRuleAction, \
AccountProperties as CognitiveServicesAccountProperties, ApiProperties as CognitiveServicesAccountApiProperties, \
Identity, ResourceIdentityType as IdentityType, \
Deployment, DeploymentModel, DeploymentScaleSettings, DeploymentProperties, \
CommitmentPlan, CommitmentPlanProperties, CommitmentPeriod
from azure.cli.command_modules.cognitiveservices._client_factory import cf_accounts, cf_resource_skus
logger = get_logger(__name__)
def list_resources(client, resource_group_name=None):
"""
List all Azure Cognitive Services accounts.
"""
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def recover(client, location, resource_group_name, account_name):
"""
Recover a deleted Azure Cognitive Services account.
"""
properties = CognitiveServicesAccountProperties()
properties.restore = True
params = CognitiveServicesAccount(properties=properties)
params.location = location
return client.begin_create(resource_group_name, account_name, params)
def list_usages(client, resource_group_name, account_name):
"""
List usages for Azure Cognitive Services account.
"""
return client.list_usages(resource_group_name, account_name).value
def list_kinds(client):
"""
List all valid kinds for Azure Cognitive Services account.
:param client: the ResourceSkusOperations
:return: a list
"""
# The client should be ResourceSkusOperations, and list() should return a list of SKUs for all regions.
# The sku will have "kind" and we use that to extract full list of kinds.
kinds = {x.kind for x in client.list()}
return sorted(list(kinds))
def list_skus(cmd, kind=None, location=None, resource_group_name=None, account_name=None):
"""
List skus for Azure Cognitive Services account.
"""
if resource_group_name is not None or account_name is not None:
logger.warning(
'list-skus with an existing account has been deprecated and will be removed in a future release.')
if resource_group_name is None:
# account_name must not be None
raise CLIError('--resource-group is required when --name is specified.')
# keep the original behavior to avoid breaking changes
return cf_accounts(cmd.cli_ctx).list_skus(resource_group_name, account_name)
# in other cases, use kind and location to filter SKUs
def _filter_sku(_sku):
if kind is not None:
if _sku.kind != kind:
return False
if location is not None:
if location.lower() not in [x.lower() for x in _sku.locations]:
return False
return True
return [x for x in cf_resource_skus(cmd.cli_ctx).list() if _filter_sku(x)]
def create(
client, resource_group_name, account_name, sku_name, kind, location, custom_domain=None,
tags=None, api_properties=None, assign_identity=False, storage=None, encryption=None,
yes=None): # pylint: disable=unused-argument
"""
Create an Azure Cognitive Services account.
"""
sku = Sku(name=sku_name)
properties = CognitiveServicesAccountProperties()
if api_properties is not None:
api_properties = CognitiveServicesAccountApiProperties.deserialize(api_properties)
properties.api_properties = api_properties
if custom_domain:
properties.custom_sub_domain_name = custom_domain
params = CognitiveServicesAccount(sku=sku, kind=kind, location=location,
properties=properties, tags=tags)
if assign_identity:
params.identity = Identity(type=IdentityType.system_assigned)
if storage is not None:
params.properties.user_owned_storage = json.loads(storage)
if encryption is not None:
params.properties.encryption = json.loads(encryption)
return client.begin_create(resource_group_name, account_name, params)
def update(client, resource_group_name, account_name, sku_name=None, custom_domain=None,
tags=None, api_properties=None, storage=None, encryption=None):
"""
Update an Azure Cognitive Services account.
"""
if sku_name is None:
sa = client.get(resource_group_name, account_name)
sku_name = sa.sku.name
sku = Sku(name=sku_name)
properties = CognitiveServicesAccountProperties()
if api_properties is not None:
api_properties = CognitiveServicesAccountApiProperties.deserialize(api_properties)
properties.api_properties = api_properties
if custom_domain:
properties.custom_sub_domain_name = custom_domain
params = CognitiveServicesAccount(sku=sku, properties=properties, tags=tags)
if storage is not None:
params.properties.user_owned_storage = json.loads(storage)
if encryption is not None:
params.properties.encryption = json.loads(encryption)
return client.begin_update(resource_group_name, account_name, params)
def default_network_acls():
rules = NetworkRuleSet()
rules.default_action = NetworkRuleAction.deny
rules.ip_rules = []
rules.virtual_network_rules = []
return rules
def list_network_rules(client, resource_group_name, account_name):
"""
List network rules for Azure Cognitive Services account.
"""
sa = client.get(resource_group_name, account_name)
rules = sa.properties.network_acls
if rules is None:
rules = default_network_acls()
return rules
def add_network_rule(client, resource_group_name, account_name, subnet=None,
vnet_name=None, ip_address=None): # pylint: disable=unused-argument
"""
Add a network rule for Azure Cognitive Services account.
"""
sa = client.get(resource_group_name, account_name)
rules = sa.properties.network_acls
if rules is None:
rules = default_network_acls()
if subnet:
from msrestazure.tools import is_valid_resource_id
if not is_valid_resource_id(subnet):
raise CLIError("Expected fully qualified resource ID: got '{}'".format(subnet))
if not rules.virtual_network_rules:
rules.virtual_network_rules = []
rules.virtual_network_rules.append(VirtualNetworkRule(id=subnet, ignore_missing_vnet_service_endpoint=True))
if ip_address:
if not rules.ip_rules:
rules.ip_rules = []
rules.ip_rules.append(IpRule(value=ip_address))
properties = CognitiveServicesAccountProperties()
properties.network_acls = rules
params = CognitiveServicesAccount(properties=properties)
return client.begin_update(resource_group_name, account_name, params)
def remove_network_rule(client, resource_group_name, account_name, ip_address=None, subnet=None,
vnet_name=None): # pylint: disable=unused-argument
"""
Remove a network rule for Azure Cognitive Services account.
"""
sa = client.get(resource_group_name, account_name)
rules = sa.properties.network_acls
if rules is None:
# nothing to update, but return the object
return client.update(resource_group_name, account_name)
if subnet:
rules.virtual_network_rules = [x for x in rules.virtual_network_rules
if not x.id.endswith(subnet)]
if ip_address:
rules.ip_rules = [x for x in rules.ip_rules if x.value != ip_address]
properties = CognitiveServicesAccountProperties()
properties.network_acls = rules
params = CognitiveServicesAccount(properties=properties)
return client.begin_update(resource_group_name, account_name, params)
def identity_assign(client, resource_group_name, account_name):
"""
Assign the identity for Azure Cognitive Services account.
"""
params = CognitiveServicesAccount()
params.identity = Identity(type=IdentityType.system_assigned)
sa = client.begin_update(resource_group_name, account_name, params).result()
return sa.identity if sa.identity else {}
def identity_remove(client, resource_group_name, account_name):
"""
Remove the identity for Azure Cognitive Services account.
"""
params = CognitiveServicesAccount()
params.identity = Identity(type=IdentityType.none)
return client.begin_update(resource_group_name, account_name, params)
def identity_show(client, resource_group_name, account_name):
"""
Show the identity for Azure Cognitive Services account.
"""
sa = client.get(resource_group_name, account_name)
return sa.identity if sa.identity else {}
def deployment_begin_create_or_update(
client, resource_group_name, account_name, deployment_name,
model_format, model_name, model_version,
sku_name=None, sku_capacity=None,
scale_settings_scale_type=None, scale_settings_capacity=None):
"""
Create a deployment for Azure Cognitive Services account.
"""
dpy = Deployment()
dpy.properties = DeploymentProperties()
dpy.properties.model = DeploymentModel()
dpy.properties.model.format = model_format
dpy.properties.model.name = model_name
dpy.properties.model.version = model_version
if sku_name is not None:
dpy.sku = Sku(name=sku_name)
dpy.sku.capacity = sku_capacity
if scale_settings_scale_type is not None:
dpy.properties.scale_settings = DeploymentScaleSettings()
dpy.properties.scale_settings.scale_type = scale_settings_scale_type
dpy.properties.scale_settings.capacity = scale_settings_capacity
return client.begin_create_or_update(resource_group_name, account_name, deployment_name, dpy, polling=False)
def commitment_plan_create_or_update(
client, resource_group_name, account_name, commitment_plan_name,
hosting_model, plan_type, auto_renew,
current_tier=None, current_count=None,
next_tier=None, next_count=None):
"""
Create a commitment plan for Azure Cognitive Services account.
"""
plan = CommitmentPlan()
plan.properties = CommitmentPlanProperties()
plan.properties.hosting_model = hosting_model
plan.properties.plan_type = plan_type
if (current_tier is not None or current_count is not None):
plan.properties.current = CommitmentPeriod()
plan.properties.current.tier = current_tier
plan.properties.current.count = current_count
if (next_tier is not None or next_count is not None):
plan.properties.next = CommitmentPeriod()
plan.properties.next.tier = next_tier
plan.properties.next.count = next_count
plan.properties.auto_renew = auto_renew
return client.create_or_update(resource_group_name, account_name, commitment_plan_name, plan)
|
61ccc578ba7e38fd1b8ca0180d8f757b1ae789b4
|
62179a165ec620ba967dbc20016e890978fbff50
|
/nncf/quantization/algorithms/hyperparameter_tuner/algorithm.py
|
fba7b984278c580802d58535049af14adaa67b76
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 15,350
|
py
|
algorithm.py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import dataclasses
import functools
import itertools
import operator
from typing import Any, Callable, Dict, Iterable, List, Tuple, Type, TypeVar, Union
from nncf.common.factory import NNCFGraphFactory
from nncf.common.factory import StatisticsAggregatorFactory
from nncf.common.graph.graph import NNCFGraph
from nncf.common.logging import nncf_logger
from nncf.common.utils.backend import get_backend
from nncf.common.utils.timer import timer
from nncf.data.dataset import Dataset
from nncf.quantization.algorithms.accuracy_control.evaluator import Evaluator
from nncf.quantization.algorithms.accuracy_control.evaluator import MetricResults
from nncf.quantization.algorithms.accuracy_control.rank_functions import create_normalized_mse_func
from nncf.quantization.algorithms.accuracy_control.subset_selection import select_subset
from nncf.quantization.algorithms.algorithm import Algorithm
TModel = TypeVar("TModel")
TTensor = TypeVar("TTensor")
CombinationKey = Tuple[int, ...]
Combination = Dict[str, Any]
def create_combinations(param_grid: Dict[str, List[Any]]) -> Dict[CombinationKey, Combination]:
"""
Creates combinations as follows:
* All keys in `param_grid` are numbered using integers from 0 to N = len(param_grid)-1
Order of keys is used. Let key_j is a key from param_grid.keys() that corresponds
integer j in {0, 1, ..., N}.
* Set of combination keys (CK) are created as Cartesian product the following sets
CK = {None, 0, 1, ..., num_val_0} x {None, 0, 1, ..., num_val_1} x ... x {None, 0, 1, ..., num_val_N},
where num_val_j is a number of values in param_grid[key_j].
* Creates combination for each combination key. If combination_key[i] is None then parameter with key_i
name is not changed. Otherwise, the param_grid[key_i][combination_key[i]] value should be included
to combination as new value for parameter with key_i name.
:param param_grid: Dictionary with parameters names as keys and list of
parameter settings to try as values.
:return: Created combination.
"""
simple_changes = []
indices = []
for param_name, values in param_grid.items():
indices.append([None, *range(len(values))])
simple_changes.append([{param_name: v} for v in values])
combinations: Dict[CombinationKey, Combination] = {}
for combination_key in itertools.product(*indices):
combination: Combination = {}
for param_idx, value_idx in enumerate(combination_key):
if value_idx is None:
continue
combination.update(simple_changes[param_idx][value_idx])
combinations[combination_key] = combination
return combinations
def is_dataclass_instance(obj: Any) -> bool:
"""
Returns `True` if object is a dataclass instance, `False` otherwise.
:param obj: Object to check.
:return: `True` if object is a dataclass instance, `False` otherwise.
"""
return dataclasses.is_dataclass(obj) and not isinstance(obj, type)
def apply_combination(init_params: Dict[str, Any], combination: Combination) -> Dict[str, Any]:
"""
Applies combination of parameters to initial parameters.
:param init_params: Initial set of parameters.
:param combination: Combination of parameters.
:return: Returns `init_params` where some values of parameters were changed according to
provided combination.
"""
DELIMITER = ":"
params = copy.deepcopy(init_params)
for param_key, param_value in combination.items():
if DELIMITER in param_key:
main_key, *path_to_attr, attr_name = param_key.split(DELIMITER)
obj = params[main_key]
assert is_dataclass_instance(obj)
for name in path_to_attr:
obj = getattr(obj, name)
assert is_dataclass_instance(obj)
setattr(obj, attr_name, param_value)
else:
params[param_key] = param_value
return params
def print_combination_and_score(title: str, combination: Combination, combination_score: float) -> None:
"""
Prints combination and score.
:param title: Title.
:param combination: Combination to print.
:param combination_score: Score of combination.
"""
if not combination:
message = "Parameters were not changed"
else:
message = ", ".join(f"{name} = {v}" for name, v in combination.items())
message = f"{title} {message}"
nncf_logger.info(message)
nncf_logger.info(f"Score: {combination_score}")
def find_best_combination(
combinations: Dict[CombinationKey, Combination],
combination_score_func: Callable[[CombinationKey], float],
param_grid: Dict[str, List[Any]],
) -> CombinationKey:
"""
Finds best combination.
:param combinations: Combinations.
:param combination_score_func: Combination score function.
:param param_grid: Dictionary with parameters names as keys and list of
parameter settings to try as values.
:return: Best combination key.
"""
best_combination_key = tuple(None for _ in param_grid)
best_combination_score = None
for param_idx, (param_name, values) in enumerate(param_grid.items()):
nncf_logger.info(f"Start search best value for the '{param_name}' parameter")
values_indices = [None, *range(len(values))]
param_best_combination_key = None
param_best_combination_score = None
for value_idx in values_indices:
combination_key = (*best_combination_key[:param_idx], value_idx, *best_combination_key[param_idx + 1 :])
combination_score = combination_score_func(combination_key)
if param_best_combination_score is None or param_best_combination_score < combination_score:
param_best_combination_score = combination_score
param_best_combination_key = combination_key
print_combination_and_score(
"Current combination of parameters:", combinations[combination_key], combination_score
)
if best_combination_score is None or best_combination_score <= param_best_combination_score:
best_combination_score = param_best_combination_score
best_combination_key = param_best_combination_key
print_combination_and_score(
"Best combination of parameters:", combinations[best_combination_key], best_combination_score
)
return best_combination_key
class HyperparameterTuner:
"""
This algorithm is used to find a best combination of parameters from `param_grid`.
The `param_grid` in simple case is a dictionary with parameters names
as keys and list of parameter settings to try as values.
param_grid = {
"param_name": [0.1, 0.2],
}
The parameters names should be same as in `algorithm_cls.__init__()` method.
In case when "param_name" parameter is a dataclass object there is a way to specify settings
to try for his fields using marker ":"
param_grid = {
"param_name:field_a": [10, 20],
"param_name:field_b:x": [0.1, 0.2],
}
In the example above the `param_name` and "param_name:field_b" parameters are dataclasses.
This rule is applied recursively.
The algorithm works as follow: let we have the following `param_grid`
param_grid = {
"param_name_0" : [0.2, 0.4, 0.6],
"param_name_1:x": [-1, -2, -3],
"param_name_2": [True, False],
}
First of all, algorithm finds the best value for parameter "param_name_0".
Further, taking into account the found value, the best value for the "param_name_1:x" parameter
is sought. After that, taking into account the found values for "param_name_0" and "param_name_1:x"
parameters, the best value for the "param_name_2" is sought.
"""
def __init__(
self,
algorithm_cls: Type[Algorithm],
init_params: Dict[str, Any],
param_grid: Dict[str, List[Any]],
calibration_dataset: Dataset,
validation_fn: Callable[[Any, Iterable[Any]], Tuple[float, Union[None, List[float], List[List[TTensor]]]]],
subset_size: int,
initial_metric_results: MetricResults,
quantized_metric_results: MetricResults,
):
"""
:param algorithm_cls: Class of algorithm.
:param init_params: Initial set of parameters used to create algorithm.
:param param_grid: Dictionary with parameters names as keys and list of
parameter settings to try as values.
:param calibration_dataset: Dataset used to collect statistics for algorithm.
:param validation_fn: Validation function used to validated model.
:param subset_size: Number of data items that should be selected
from the dataset and used to validate model.
:param initial_metric_results: Metric results for initial model.
:param quantized_metric_results: Metric results for quantized with `init_params` model.
"""
self._algorithm_cls = algorithm_cls
self._init_params = init_params
self._param_grid = param_grid
self._calibration_dataset = calibration_dataset
self._evaluator = Evaluator(validation_fn)
self._subset_size = subset_size
self._initial_metric_results = initial_metric_results
self._quantized_metric_results = quantized_metric_results
self._is_metric_mode = isinstance(self._initial_metric_results.values_for_each_item[0], float)
# # Will be initialized inside `apply()` method
self._error_fn = None
# Will be initialized inside `_initialize_algorithms()` method
self._algorithms: Dict[CombinationKey, Algorithm] = {}
self._statistic_points = None
self._calculated_scores: Dict[CombinationKey, float] = {}
def apply(self, model: TModel, validation_dataset: Dataset) -> TModel:
"""
Applies algorithm to input model.
:param model: Input model.
:param validation_dataset: Dataset used to validate resulted model.
:return: Resulted model.
"""
if self._is_metric_mode:
self._error_fn = operator.sub
else:
self._error_fn = create_normalized_mse_func(get_backend(model))
subset_indices = select_subset(
self._subset_size,
self._initial_metric_results.values_for_each_item,
self._quantized_metric_results.values_for_each_item,
self._error_fn,
)
combinations = create_combinations(self._param_grid)
initial_graph = NNCFGraphFactory.create(model)
nncf_logger.info("Start initialization of algorithms")
with timer():
self._prepare_algorithms(model, initial_graph, combinations)
combination_score_fn = functools.partial(
self._calculate_combination_score,
initial_model=model,
initial_graph=initial_graph,
dataset=validation_dataset,
subset_indices=subset_indices,
)
nncf_logger.info("Start search best combination of parameters")
with timer():
best_combination_key = find_best_combination(combinations, combination_score_fn, self._param_grid)
algorithm = self._algorithms[best_combination_key]
result_model = algorithm.apply(model, initial_graph, self._statistic_points)
return result_model
def _prepare_algorithms(
self, initial_model: TModel, initial_graph: NNCFGraph, combinations: Dict[CombinationKey, Combination]
) -> None:
"""
Creates algorithm for each combination of parameters. Collects statistics for
created algorithms.
:param initial_model: Input model used to collect statistics for algorithms.
:param combinations: Combinations of parameters.
"""
for combination_key, combination in combinations.items():
kwargs = apply_combination(self._init_params, combination)
self._algorithms[combination_key] = self._algorithm_cls(**kwargs)
# Collect required statistics for created algorithms
stats_aggregator = StatisticsAggregatorFactory.create(initial_model, self._calibration_dataset)
for algorithm in self._algorithms.values():
statistic_points = algorithm.get_statistic_points(initial_model, initial_graph)
stats_aggregator.register_statistic_points(statistic_points)
stats_aggregator.collect_statistics(initial_model, initial_graph)
self._statistic_points = stats_aggregator.statistic_points
def _calculate_combination_score(
self,
combination_key: CombinationKey,
initial_model: TModel,
initial_graph: NNCFGraph,
dataset: Dataset,
subset_indices: List[int],
) -> float:
"""
Calculates score for provided combination.
:param combination_key: Combination key.
:param initial_model: Input model.
:param dataset: Dataset used to select data items for validation.
:param subset_indices: Zero-based indices of data items that should be selected
from the dataset and used to validate model.
:return: Calculated score.
"""
if combination_key in self._calculated_scores:
return self._calculated_scores[combination_key]
algorithm = self._algorithms[combination_key]
model = algorithm.apply(initial_model, initial_graph, self._statistic_points)
score = self._validate_model(model, dataset, subset_indices)
self._calculated_scores[combination_key] = score
return score
def _validate_model(self, model: TModel, dataset: Dataset, subset_indices: List[int]) -> float:
"""
Validates input model on subset.
:param model: Input model.
:param dataset: Dataset used to select data items for validation.
:param subset_indices: Zero-based indices of data items that should be selected
from the dataset and used to validate model.
:return: Calculated metric.
"""
if self._is_metric_mode:
metric_value, _ = self._evaluator.validate(model, dataset, subset_indices)
else:
approximate_outputs = self._evaluator.collect_values_for_each_item(model, dataset, subset_indices)
reference_outputs = [self._initial_metric_results.values_for_each_item[i] for i in subset_indices]
errors = [self._error_fn(a, b) for a, b in zip(reference_outputs, approximate_outputs)]
metric_value = sum(errors) / len(errors)
return metric_value
|
29fb0f392a94b44499f146b7cc89f67a22b26d51
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/webpubsub/azure-messaging-webpubsubservice/tests/disable_test_smoke_async.py
|
d7e660ea790a7b0e49d3f5b1629c193e082f3d52
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,697
|
py
|
disable_test_smoke_async.py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from testcase import WebpubsubPowerShellPreparer
from testcase_async import WebpubsubAsyncTest
class WebpubsubSmokeAsyncTest(WebpubsubAsyncTest):
@WebpubsubPowerShellPreparer()
async def test_webpubsub_send_to_all(self, webpubsub_endpoint):
client = self.create_client(endpoint=webpubsub_endpoint, hub='hub')
await client.send_to_all({'hello': 'test_webpubsub_send_to_all'})
@WebpubsubPowerShellPreparer()
async def test_webpubsub_send_to_all_apim_proxy(self, webpubsub_endpoint, webpubsub_reverse_proxy_endpoint=None):
client = self.create_client(endpoint=webpubsub_endpoint, hub='hub', reverse_proxy_endpoint=webpubsub_reverse_proxy_endpoint)
await client.send_to_all({'hello': 'test_webpubsub_send_to_all_apim_proxy'})
@WebpubsubPowerShellPreparer()
async def test_get_client_access_token(self, webpubsub_endpoint):
client = self.create_client(endpoint=webpubsub_endpoint, hub='hub')
access_token = await client.get_client_access_token()
assert len(access_token) == 3
assert access_token['baseUrl'][:3] == "wss"
assert access_token['token']
assert access_token['url'][:3] == "wss"
@WebpubsubPowerShellPreparer()
async def test_hello_world_with_connection_string(self, webpubsub_connection_string):
client = self.create_client(connection_string=webpubsub_connection_string, hub="hub")
await client.send_to_all(message="Hello, World!", content_type="text/plain")
@WebpubsubPowerShellPreparer()
async def test_hello_world_with_connection_string_json(self, webpubsub_connection_string):
client = self.create_client(connection_string=webpubsub_connection_string, hub="hub")
await client.send_to_all(message={"hello": "world!"})
@WebpubsubPowerShellPreparer()
async def test_hello_world_with_connection_string_binary(self, webpubsub_connection_string):
client = self.create_client(connection_string=webpubsub_connection_string, hub="hub")
await client.send_to_all(message=b"Hello, World!", content_type="application/octet-stream")
@WebpubsubPowerShellPreparer()
async def test_no_users_groups(self, webpubsub_connection_string):
client = self.create_client(connection_string=webpubsub_connection_string, hub="hub")
assert not await client.user_exists(user_id="fake user")
assert not await client.group_exists(group="fake group")
@WebpubsubPowerShellPreparer()
async def test_remove_connection_from_all_groups(self, webpubsub_connection_string):
client = self.create_client(connection_string=webpubsub_connection_string, hub="hub")
await client.remove_connection_from_all_groups(connection_id="fake connection id")
@WebpubsubPowerShellPreparer()
async def test_send_with_filter(self, webpubsub_connection_string):
client = self.create_client(connection_string=webpubsub_connection_string, hub="hub")
await client.send_to_all(message={"hello": "world!"}, filter="userId ne 'user1'", content_type="text/plain")
@WebpubsubPowerShellPreparer()
async def test_get_client_access_key_with_groups(self, webpubsub_connection_string):
client = self.create_client(connection_string=webpubsub_connection_string, hub="hub")
await client.get_client_access_token(user_id="user1", groups=["groups1"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.